id stringlengths 1 8 | text stringlengths 6 1.05M | dataset_id stringclasses 1
value |
|---|---|---|
6575821 | import time
import os
import glob
import re
from json.decoder import JSONDecodeError
from rotkehlchen.exchange import data_up_todate
from rotkehlchen.kraken import kraken_to_world_pair
from rotkehlchen.bittrex import trade_from_bittrex
from rotkehlchen.bitmex import trade_from_bitmex
from rotkehlchen.binance import trade_from_binance
from rotkehlchen.transactions import query_etherscan_for_transactions, transactions_from_dictlist
from rotkehlchen.fval import FVal
from rotkehlchen.utils import (
createTimeStamp,
tsToDate,
get_pair_position,
get_jsonfile_contents_or_empty_dict,
rlk_jsonloads,
rlk_jsondumps,
convert_to_int,
ts_now,
request_get
)
from rotkehlchen.order_formatting import (
Trade,
trades_from_dictlist,
asset_movements_from_dictlist
)
from rotkehlchen.inquirer import FIAT_CURRENCIES
from rotkehlchen.errors import RemoteError
import logging
logger = logging.getLogger(__name__)
TRADES_HISTORYFILE = 'trades_history.json'
MARGIN_HISTORYFILE = 'margin_trades_history.json'
MANUAL_MARGINS_LOGFILE = 'manual_margin_positions_log.json'
LOANS_HISTORYFILE = 'loans_history.json'
ETHEREUM_TX_LOGFILE = 'ethereum_tx_log.json'
ASSETMOVEMENTS_HISTORYFILE = 'asset_movements_history.json'
class NoPriceForGivenTimestamp(Exception):
def __init__(self, from_asset, to_asset, timestamp):
super(NoPriceForGivenTimestamp, self).__init__(
'Unable to query a historical price for "{}" to "{}" at {}'.format(
from_asset, to_asset, timestamp
)
)
class PriceQueryUnknownFromAsset(Exception):
def __init__(self, from_asset):
super(PriceQueryUnknownFromAsset, self).__init__(
'Unable to query historical price for Unknown Asset: "{}"'.format(from_asset)
)
def include_external_trades(db, start_ts, end_ts, history):
external_trades = db.get_external_trades()
external_trades = trades_from_dictlist(external_trades, start_ts, end_ts)
history.extend(external_trades)
history.sort(key=lambda trade: trade.timestamp)
return history
def trade_from_kraken(kraken_trade):
"""Turn a kraken trade returned from kraken trade history to our common trade
history format"""
currency_pair = kraken_to_world_pair(kraken_trade['pair'])
quote_currency = get_pair_position(currency_pair, 'second')
return Trade(
# Kraken timestamps have floating point ...
timestamp=convert_to_int(kraken_trade['time'], accept_only_exact=False),
pair=currency_pair,
type=kraken_trade['type'],
rate=FVal(kraken_trade['price']),
cost=FVal(kraken_trade['cost']),
cost_currency=quote_currency,
fee=FVal(kraken_trade['fee']),
fee_currency=quote_currency,
amount=FVal(kraken_trade['vol']),
location='kraken'
)
def trade_from_poloniex(poloniex_trade, pair):
"""Turn a poloniex trade returned from poloniex trade history to our common trade
history format"""
trade_type = poloniex_trade['type']
amount = FVal(poloniex_trade['amount'])
rate = FVal(poloniex_trade['rate'])
perc_fee = FVal(poloniex_trade['fee'])
base_currency = get_pair_position(pair, 'first')
quote_currency = get_pair_position(pair, 'second')
if trade_type == 'buy':
cost = rate * amount
cost_currency = base_currency
fee = amount * perc_fee
fee_currency = quote_currency
elif trade_type == 'sell':
cost = amount * rate
cost_currency = base_currency
fee = cost * perc_fee
fee_currency = base_currency
else:
raise ValueError('Got unexpected trade type "{}" for poloniex trade'.format(trade_type))
if poloniex_trade['category'] == 'settlement':
trade_type = "settlement_%s" % trade_type
return Trade(
timestamp=createTimeStamp(poloniex_trade['date'], formatstr="%Y-%m-%d %H:%M:%S"),
pair=pair,
type=trade_type,
rate=rate,
cost=cost,
cost_currency=cost_currency,
fee=fee,
fee_currency=fee_currency,
amount=amount,
location='poloniex'
)
def do_read_manual_margin_positions(data_directory):
manual_margin_path = os.path.join(data_directory, MANUAL_MARGINS_LOGFILE)
if os.path.isfile(manual_margin_path):
with open(manual_margin_path, 'r') as f:
margin_data = rlk_jsonloads(f.read())
else:
margin_data = []
logger.info(
'Could not find manual margins log file at {}'.format(manual_margin_path)
)
return margin_data
def write_history_data_in_file(data, filepath, start_ts, end_ts):
with open(filepath, 'w') as outfile:
history_dict = dict()
history_dict['data'] = data
history_dict['start_time'] = start_ts
history_dict['end_time'] = end_ts
outfile.write(rlk_jsondumps(history_dict))
def write_tupledata_history_in_file(history, filepath, start_ts, end_ts):
out_history = [tr._asdict() for tr in history]
write_history_data_in_file(out_history, filepath, start_ts, end_ts)
def limit_trade_list_to_period(trades_list, start_ts, end_ts):
"""Accepts a SORTED by timestamp trades_list and returns a shortened version
of that list limited to a specific time period"""
start_idx = None
end_idx = -1
for idx, trade in enumerate(trades_list):
if start_idx is None and trade.timestamp >= start_ts:
start_idx = idx
elif end_idx == -1 and trade.timestamp > end_ts:
end_idx = idx - 1 if idx >= 1 else 0
break
return trades_list[start_idx:end_idx] if start_idx is not None else list()
def pairwise(iterable):
"s -> (s0, s1), (s2, s3), (s4, s5), ..."
a = iter(iterable)
return zip(a, a)
def check_hourly_data_sanity(data, from_asset, to_asset):
"""Check that the hourly data is an array of objects having timestamps
increasing by 1 hour.
"""
index = 0
for n1, n2 in pairwise(data):
diff = n2['time'] - n1['time']
if diff != 3600:
print(
"Problem at indices {} and {} of {}_to_{} prices. Time difference is: {}".format(
index, index + 1, from_asset, to_asset, diff)
)
return False
index += 2
return True
def process_polo_loans(data, start_ts, end_ts):
new_data = list()
for loan in reversed(data):
close_time = createTimeStamp(loan['close'], formatstr="%Y-%m-%d %H:%M:%S")
open_time = createTimeStamp(loan['open'], formatstr="%Y-%m-%d %H:%M:%S")
if open_time < start_ts:
continue
if close_time > end_ts:
break
new_data.append({
'open_time': open_time,
'close_time': close_time,
'currency': loan['currency'],
'fee': FVal(loan['fee']),
'earned': FVal(loan['earned']),
'amount_lent': FVal(loan['amount']),
})
new_data.sort(key=lambda loan: loan['open_time'])
return new_data
class PriceHistorian(object):
def __init__(self, data_directory, history_date_start):
self.data_directory = data_directory
# get the start date for historical data
self.historical_data_start = createTimeStamp(history_date_start, formatstr="%d/%m/%Y")
self.price_history = dict()
self.price_history_file = dict()
# Check the data folder and remember the filenames of any cached history
prefix = os.path.join(self.data_directory, 'price_history_')
prefix = prefix.replace('\\', '\\\\')
regex = re.compile(prefix + '(.*)\\.json')
files_list = glob.glob(prefix + '*.json')
for file_ in files_list:
match = regex.match(file_)
assert match
cache_key = match.group(1)
self.price_history_file[cache_key] = file_
# Get coin list of crypto compare
invalidate_cache = True
coinlist_cache_path = os.path.join(self.data_directory, 'cryptocompare_coinlist.json')
if os.path.isfile(coinlist_cache_path):
with open(coinlist_cache_path, 'rb') as f:
try:
data = rlk_jsonloads(f.read())
now = ts_now()
invalidate_cache = False
# If we got a cache and its' over a month old then requery cryptocompare
if data['time'] < now and now - data['time'] > 2629800:
invalidate_cache = True
data = data['data']
except JSONDecodeError:
invalidate_cache = True
if invalidate_cache:
query_string = 'https://www.cryptocompare.com/api/data/coinlist/'
resp = request_get(query_string)
if 'Response' not in resp or resp['Response'] != 'Success':
error_message = 'Failed to query cryptocompare for: "{}"'.format(query_string)
if 'Message' in resp:
error_message += ". Error: {}".format(resp['Message'])
raise ValueError(error_message)
data = resp['Data']
# Also save the cache
with open(coinlist_cache_path, 'w') as f:
write_data = {'time': ts_now(), 'data': data}
f.write(rlk_jsondumps(write_data))
else:
# in any case take the data
data = data['data']
self.cryptocompare_coin_list = data
# For some reason even though price for the following assets is returned
# it's not in the coinlist so let's add them here.
self.cryptocompare_coin_list['DAO'] = object()
self.cryptocompare_coin_list['USDT'] = object()
def got_cached_price(self, cache_key, timestamp):
"""Check if we got a price history for the timestamp cached"""
if cache_key in self.price_history_file:
if cache_key not in self.price_history:
try:
with open(self.price_history_file[cache_key], 'rb') as f:
data = rlk_jsonloads(f.read())
self.price_history[cache_key] = data
except (OSError, IOError, JSONDecodeError):
return False
in_range = (
self.price_history[cache_key]['start_time'] <= timestamp and
self.price_history[cache_key]['end_time'] > timestamp
)
if in_range:
return True
return False
def get_historical_data(self, from_asset, to_asset, timestamp):
"""Get historical price data from cryptocompare"""
if from_asset not in self.cryptocompare_coin_list:
raise ValueError(
'Attempted to query historical price data for '
'unknown asset "{}"'.format(from_asset)
)
if to_asset not in self.cryptocompare_coin_list and to_asset not in FIAT_CURRENCIES:
raise ValueError(
'Attempted to query historical price data for '
'unknown asset "{}"'.format(to_asset)
)
cache_key = from_asset + '_' + to_asset
got_cached_value = self.got_cached_price(cache_key, timestamp)
if got_cached_value:
return self.price_history[cache_key]['data']
now_ts = int(time.time())
cryptocompare_hourquerylimit = 2000
calculated_history = list()
if self.historical_data_start <= timestamp:
end_date = self.historical_data_start
else:
end_date = timestamp
while True:
pr_end_date = end_date
end_date = end_date + (cryptocompare_hourquerylimit) * 3600
query_string = (
'https://min-api.cryptocompare.com/data/histohour?'
'fsym={}&tsym={}&limit={}&toTs={}'.format(
from_asset, to_asset, cryptocompare_hourquerylimit, end_date
))
resp = request_get(query_string)
if 'Response' not in resp or resp['Response'] != 'Success':
error_message = 'Failed to query cryptocompare for: "{}"'.format(query_string)
if 'Message' in resp:
error_message += ". Error: {}".format(resp['Message'])
raise ValueError(error_message)
if pr_end_date != resp['TimeFrom']:
# If we get more than we needed, since we are close to the now_ts
# then skip all the already included entries
diff = pr_end_date - resp['TimeFrom']
if resp['Data'][diff // 3600]['time'] != pr_end_date:
raise ValueError(
'Expected to find the previous date timestamp during '
'historical data fetching'
)
# just add only the part from the previous timestamp and on
resp['Data'] = resp['Data'][diff // 3600:]
if end_date < now_ts and resp['TimeTo'] != end_date:
raise ValueError('End dates no match')
# If last time slot and first new are the same, skip the first new slot
last_entry_equal_to_first = (
len(calculated_history) != 0 and
calculated_history[-1]['time'] == resp['Data'][0]['time']
)
if last_entry_equal_to_first:
resp['Data'] = resp['Data'][1:]
calculated_history += resp['Data']
if end_date >= now_ts:
break
# Let's always check for data sanity for the hourly prices.
assert check_hourly_data_sanity(calculated_history, from_asset, to_asset)
self.price_history[cache_key] = {
'data': calculated_history,
'start_time': self.historical_data_start,
'end_time': now_ts
}
# and now since we actually queried the data let's also save them locally
filename = os.path.join(self.data_directory, 'price_history_' + cache_key + '.json')
write_history_data_in_file(
calculated_history,
filename,
self.historical_data_start,
now_ts
)
self.price_history_file[cache_key] = filename
return calculated_history
def query_historical_price(self, from_asset, to_asset, timestamp):
"""
Query the historical price on `timestamp` for `from_asset` in `to_asset`.
So how much `to_asset` does 1 unit of `from_asset` cost.
Args:
from_asset (str): The ticker symbol of the asset for which we want to know
the price.
to_asset (str): The ticker symbol of the asset against which we want to
know the price.
timestamp (int): The timestamp at which to query the price
"""
if from_asset == to_asset:
return 1
if from_asset not in self.cryptocompare_coin_list:
raise PriceQueryUnknownFromAsset(from_asset)
data = self.get_historical_data(from_asset, to_asset, timestamp)
# all data are sorted and timestamps are always increasing by 1 hour
# find the closest entry to the provided timestamp
assert timestamp > data[0]['time']
index = convert_to_int((timestamp - data[0]['time']) / 3600, accept_only_exact=False)
# print("timestamp: {} index: {} data_length: {}".format(timestamp, index, len(data)))
diff = abs(data[index]['time'] - timestamp)
if index + 1 <= len(data) - 1:
diff_p1 = abs(data[index + 1]['time'] - timestamp)
if diff_p1 < diff:
index = index + 1
if data[index]['high'] is None or data[index]['low'] is None:
# If we get some None in the hourly set price to 0 so that we check daily price
price = FVal(0)
else:
price = FVal((data[index]['high'] + data[index]['low'])) / 2
if price == 0:
if from_asset != 'BTC' and to_asset != 'BTC':
# Just get the BTC price
asset_btc_price = self.query_historical_price(from_asset, 'BTC', timestamp)
btc_to_asset_price = self.query_historical_price('BTC', to_asset, timestamp)
price = asset_btc_price * btc_to_asset_price
else:
# attempt to get the daily price by timestamp
query_string = (
'https://min-api.cryptocompare.com/data/pricehistorical?'
'fsym={}&tsyms={}&ts={}'.format(
from_asset, to_asset, timestamp
))
if to_asset == 'BTC':
query_string += '&tryConversion=false'
resp = request_get(query_string)
if from_asset not in resp:
error_message = 'Failed to query cryptocompare for: "{}"'.format(query_string)
raise ValueError(error_message)
price = FVal(resp[from_asset][to_asset])
if price == 0:
raise NoPriceForGivenTimestamp(
from_asset,
to_asset,
tsToDate(timestamp, formatstr='%d/%m/%Y, %H:%M:%S')
)
return price
class TradesHistorian(object):
def __init__(
self,
data_directory,
db,
eth_accounts,
historical_data_start,
):
self.poloniex = None
self.kraken = None
self.bittrex = None
self.bitmex = None
self.binance = None
self.data_directory = data_directory
self.db = db
self.eth_accounts = eth_accounts
# get the start date for historical data
self.historical_data_start = createTimeStamp(historical_data_start, formatstr="%d/%m/%Y")
# If this flag is true we attempt to read from the manually logged margin positions file
self.read_manual_margin_positions = True
def set_exchange(self, name, exchange_obj):
if getattr(self, name) is None or exchange_obj is None:
setattr(self, name, exchange_obj)
elif exchange_obj:
raise ValueError(
'Attempted to set {} exchange in TradesHistorian while it was '
'already set'.format(name)
)
def query_poloniex_history(self, history, asset_movements, start_ts, end_ts, end_at_least_ts):
poloniex_margin_trades = list()
polo_loans = list()
if self.poloniex is not None:
polo_history = self.poloniex.query_trade_history(
start_ts=start_ts,
end_ts=end_ts,
end_at_least_ts=end_at_least_ts
)
for pair, trades in polo_history.items():
for trade in trades:
category = trade['category']
if category == 'exchange' or category == 'settlement':
history.append(trade_from_poloniex(trade, pair))
elif category == 'marginTrade':
if not self.read_manual_margin_positions:
poloniex_margin_trades.append(trade_from_poloniex(trade, pair))
else:
raise ValueError("Unexpected poloniex trade category: {}".format(category))
if self.read_manual_margin_positions:
# Just read the manual positions log and make virtual trades that
# correspond to the profits
assert poloniex_margin_trades == list(), (
"poloniex margin trades list should be empty here"
)
poloniex_margin_trades = do_read_manual_margin_positions(
self.data_directory
)
else:
poloniex_margin_trades.sort(key=lambda trade: trade.timestamp)
poloniex_margin_trades = limit_trade_list_to_period(
poloniex_margin_trades,
start_ts,
end_ts
)
polo_loans = self.poloniex.query_loan_history(
start_ts=start_ts,
end_ts=end_ts,
end_at_least_ts=end_at_least_ts,
from_csv=True
)
polo_loans = process_polo_loans(polo_loans, start_ts, end_ts)
polo_asset_movements = self.poloniex.query_deposits_withdrawals(
start_ts=start_ts,
end_ts=end_ts,
end_at_least_ts=end_at_least_ts,
)
asset_movements.extend(polo_asset_movements)
return history, asset_movements, poloniex_margin_trades, polo_loans
def create_history(self, start_ts, end_ts, end_at_least_ts):
"""Creates trades and loans history from start_ts to end_ts or if
`end_at_least` is given and we have a cache history for that particular source
which satisfies it we return the cache
"""
# start creating the all trades history list
history = list()
asset_movements = list()
empty_or_error = ''
if self.kraken is not None:
try:
kraken_history = self.kraken.query_trade_history(
start_ts=start_ts,
end_ts=end_ts,
end_at_least_ts=end_at_least_ts
)
for trade in kraken_history:
history.append(trade_from_kraken(trade))
kraken_asset_movements = self.kraken.query_deposits_withdrawals(
start_ts=start_ts,
end_ts=end_ts,
end_at_least_ts=end_at_least_ts,
)
asset_movements.extend(kraken_asset_movements)
except RemoteError as e:
empty_or_error += '\n' + str(e)
try:
(
history,
asset_movements,
poloniex_margin_trades,
polo_loans,
) = self.query_poloniex_history(
history,
asset_movements,
start_ts,
end_ts,
end_at_least_ts,
)
except RemoteError as e:
empty_or_error += '\n' + str(e)
if self.bittrex is not None:
try:
bittrex_history = self.bittrex.query_trade_history(
start_ts=start_ts,
end_ts=end_ts,
end_at_least_ts=end_at_least_ts,
)
for trade in bittrex_history:
history.append(trade_from_bittrex(trade))
except RemoteError as e:
empty_or_error += '\n' + str(e)
if self.bitmex is not None:
try:
bitmex_history = self.bitmex.query_trade_history(
start_ts=start_ts,
end_ts=end_ts,
end_at_least_ts=end_at_least_ts,
)
for trade in bitmex_history:
history.append(trade_from_bitmex(trade))
except RemoteError as e:
empty_or_error += '\n' + str(e)
if self.binance is not None:
try:
binance_history = self.binance.query_trade_history(
start_ts=start_ts,
end_ts=end_ts,
end_at_least_ts=end_at_least_ts
)
for trade in binance_history:
history.append(trade_from_binance(trade, self.binance.symbols_to_pair))
except RemoteError as e:
empty_or_error += '\n' + str(e)
try:
eth_transactions = query_etherscan_for_transactions(self.eth_accounts)
except RemoteError as e:
empty_or_error += '\n' + str(e)
# We sort it here ... but when accounting runs through the entire actions list,
# it resorts, so unless the fact that we sort is used somewhere else too, perhaps
# we can skip it?
history.sort(key=lambda trade: trade.timestamp)
history = limit_trade_list_to_period(history, start_ts, end_ts)
# Write to files
historyfile_path = os.path.join(self.data_directory, TRADES_HISTORYFILE)
write_tupledata_history_in_file(history, historyfile_path, start_ts, end_ts)
if self.poloniex is not None and not self.read_manual_margin_positions:
marginfile_path = os.path.join(self.data_directory, MARGIN_HISTORYFILE)
write_tupledata_history_in_file(
poloniex_margin_trades,
marginfile_path,
start_ts,
end_ts
)
if self.poloniex is not None:
loansfile_path = os.path.join(self.data_directory, LOANS_HISTORYFILE)
write_history_data_in_file(polo_loans, loansfile_path, start_ts, end_ts)
assetmovementsfile_path = os.path.join(self.data_directory, ASSETMOVEMENTS_HISTORYFILE)
write_tupledata_history_in_file(asset_movements, assetmovementsfile_path, start_ts, end_ts)
eth_tx_log_path = os.path.join(self.data_directory, ETHEREUM_TX_LOGFILE)
write_tupledata_history_in_file(eth_transactions, eth_tx_log_path, start_ts, end_ts)
# After writting everything to files include the external trades in the history
history = include_external_trades(self.db, start_ts, end_ts, history)
return (
empty_or_error,
history,
poloniex_margin_trades,
polo_loans,
asset_movements,
eth_transactions,
)
def get_history(self, start_ts, end_ts, end_at_least_ts=None):
"""Gets or creates trades and loans history from start_ts to end_ts or if
`end_at_least` is given and we have a cache history which satisfies it we
return the cache
"""
if end_at_least_ts is None:
end_at_least_ts = end_ts
historyfile_path = os.path.join(self.data_directory, TRADES_HISTORYFILE)
if os.path.isfile(historyfile_path):
with open(historyfile_path, 'r') as infile:
try:
history_json_data = rlk_jsonloads(infile.read())
except JSONDecodeError:
pass
all_history_okay = data_up_todate(history_json_data, start_ts, end_at_least_ts)
poloniex_history_okay = True
if self.poloniex is not None:
poloniex_history_okay = self.poloniex.check_trades_cache(
start_ts, end_at_least_ts
) is not None
kraken_history_okay = True
if self.kraken is not None:
kraken_history_okay = self.kraken.check_trades_cache(
start_ts, end_at_least_ts
) is not None
bittrex_history_okay = True
if self.bittrex is not None:
bittrex_history_okay = self.bittrex.check_trades_cache(
start_ts, end_at_least_ts
) is not None
bitmex_history_okay = True
if self.bitmex is not None:
bitmex_history_okay = self.bitmex.check_trades_cache(
start_ts, end_at_least_ts
) is not None
binance_history_okay = True
if self.binance is not None:
binance_history_okay = self.binance.check_trades_cache(
start_ts, end_at_least_ts
) is not None
if not self.read_manual_margin_positions:
marginfile_path = os.path.join(self.data_directory, MARGIN_HISTORYFILE)
margin_file_contents = get_jsonfile_contents_or_empty_dict(marginfile_path)
margin_history_is_okay = data_up_todate(
margin_file_contents,
start_ts,
end_at_least_ts
)
else:
margin_history_is_okay = True
margin_file_contents = do_read_manual_margin_positions(
self.data_directory
)
loansfile_path = os.path.join(self.data_directory, LOANS_HISTORYFILE)
loan_file_contents = get_jsonfile_contents_or_empty_dict(loansfile_path)
loan_history_is_okay = data_up_todate(
loan_file_contents,
start_ts,
end_at_least_ts
)
assetmovementsfile_path = os.path.join(
self.data_directory,
ASSETMOVEMENTS_HISTORYFILE
)
asset_movements_contents = get_jsonfile_contents_or_empty_dict(
assetmovementsfile_path
)
asset_movements_history_is_okay = data_up_todate(
asset_movements_contents,
start_ts,
end_at_least_ts
)
eth_tx_log_path = os.path.join(self.data_directory, ETHEREUM_TX_LOGFILE)
eth_tx_log_contents = get_jsonfile_contents_or_empty_dict(eth_tx_log_path)
eth_tx_log_history_history_is_okay = data_up_todate(
eth_tx_log_contents,
start_ts,
end_at_least_ts
)
if (
all_history_okay and
poloniex_history_okay and
kraken_history_okay and
bittrex_history_okay and
bitmex_history_okay and
binance_history_okay and
margin_history_is_okay and
loan_history_is_okay and
asset_movements_history_is_okay and
eth_tx_log_history_history_is_okay):
history_trades = trades_from_dictlist(
history_json_data['data'],
start_ts,
end_ts
)
if not self.read_manual_margin_positions:
margin_trades = trades_from_dictlist(
margin_file_contents['data'],
start_ts,
end_ts
)
else:
margin_trades = margin_file_contents
eth_transactions = transactions_from_dictlist(
eth_tx_log_contents['data'],
start_ts,
end_ts
)
asset_movements = asset_movements_from_dictlist(
asset_movements_contents['data'],
start_ts,
end_ts
)
history_trades = include_external_trades(
self.db,
start_ts,
end_ts,
history_trades
)
# make sure that this is the same as what is returned
# from create_history
return (
'',
history_trades,
margin_trades,
loan_file_contents['data'],
asset_movements,
eth_transactions
)
return self.create_history(start_ts, end_ts, end_at_least_ts)
| StarcoderdataPython |
3265610 | <reponame>osthafen/spruned
from typing import Dict
from spruned.daemon.bitcoin_p2p import utils
from spruned.daemon.bitcoin_p2p.p2p_connection import P2PConnectionPool
from spruned.daemon.bitcoin_p2p.p2p_interface import P2PInterface
def build(network: Dict):
assert isinstance(network, dict), network
pool = P2PConnectionPool(connections=8, batcher_timeout=30, network=network['pycoin'])
interface = P2PInterface(pool, network=network['pycoin'])
return pool, interface
| StarcoderdataPython |
3487563 | <filename>old/dronekit-python/dronekit/util.py<gh_stars>0
from __future__ import print_function
import sys
def errprinter(*args):
logger(*args)
def logger(*args):
print(*args, file=sys.stderr)
sys.stderr.flush()
| StarcoderdataPython |
3371168 | import json
from requests import post,get
import time
from PIL import Image, ImageChops
import io
state = []
data = []
url = 'https://data.mongodb-api.com/app/xmastree-lpeci/endpoint/lights'
def brightest_point(image):
# Find the brightest pixel
max_brightness = 0
x, y = 0, 0
for i in range(image.size[0]):
for j in range(image.size[1]):
if sum(image.getpixel((i,j))) > max_brightness:
max_brightness = sum(image.getpixel((i,j)))
x, y = i, j
return x, y
def process_image(n,i):
image = Image.open(io.BytesIO(i.content))
difference = ImageChops.difference(image, off_image)
difference.save(F"lamp_{n}.png")
x, y = brightest_point(difference)
data.append({"n":n,"x":x,"y":y})
print(f"{n} {x} {y}")
#Get base image
state = [{"r":0,"g":0,"b":0}]*50
body = {"state": state}
headers = {'content-type': 'application/json'}
r = post(url, data=json.dumps(body), headers=headers)
time.sleep(3); #Wait for Camera to get it
i = get('https://data.mongodb-api.com/app/xmastree-lpeci/endpoint/image')
off_image = Image.open(io.BytesIO(i.content))
off_image.save("off.png")
for x in range(50):
state = [{"r":0,"g":0,"b":0}]*50
state[x] = {"r":255,"g":255,"b":255}
body = {"state": state}
headers = {'content-type': 'application/json'}
r = post(url, data=json.dumps(body), headers=headers)
if r.status_code == 200:
print('John\'s Xmas tree was successfully randomized!')
print(x)
time.sleep(4)
i = get('https://data.mongodb-api.com/app/xmastree-lpeci/endpoint/image')
process_image(x,i)
else:
print(f"Nice try! {r.status_code}")
time.sleep(1);
with open('data.txt', 'w') as outfile:
json.dump(data, outfile)
| StarcoderdataPython |
1702291 | import matplotlib.pyplot as plt
import numpy as np
import pathlib
import f16
import casadi as ca
import pytest
from casadi.tools.graph import graph
import os
TRIM_TOL = 1e-5
def plot_table2D(title, path, x_grid, y_grid, x_label, y_label, f_table):
X, Y = np.meshgrid(x_grid, y_grid)
Z = np.zeros((len(x_grid), len(y_grid)))
for i, x in enumerate(x_grid):
for j, y in enumerate(y_grid):
Z[i, j] = f_table(x, y)
plt.figure()
plt.contourf(X, Y, Z.T, levels=20)
plt.colorbar()
plt.xlabel(x_label)
plt.ylabel(y_label)
plt.title(title)
plt.savefig(path.joinpath('{:s}.png'.format(title)))
plt.close()
def test_tables():
alpha_deg_grid = np.linspace(-15, 50, 20)
beta_deg_grid = np.linspace(-35, 35, 20)
elev_deg_grid = np.linspace(-30, 30, 20)
ail_deg_grid = np.linspace(-30, 30, 20)
mach_grid = np.linspace(0, 1.1, 20)
alt_grid = np.linspace(-1e4, 6e4, 20)
path = pathlib.Path('results')
path.mkdir(parents=True, exist_ok=True)
tables = f16.tables
plot_table2D('Cl', path, alpha_deg_grid, beta_deg_grid, 'alpha_deg', 'beta_deg', tables['Cl'])
plot_table2D('Cm', path, alpha_deg_grid, elev_deg_grid, 'alpha_deg', 'elev_deg', tables['Cm'])
plot_table2D('Cn', path, alpha_deg_grid, beta_deg_grid, 'alpha_deg', 'beta_deg', tables['Cn'])
plot_table2D('Cx', path, alpha_deg_grid, elev_deg_grid, 'alpha_deg', 'elev_deg', tables['Cx'])
plot_table2D('Cy', path, beta_deg_grid, ail_deg_grid, 'beta_deg', 'ail_deg',
lambda x, y: tables['Cy'](x, y, 0))
plot_table2D('Cz', path, alpha_deg_grid, beta_deg_grid, 'alpha_deg', 'beta_deg',
lambda x, y: tables['Cz'](x, y, 0))
plot_table2D('thrust_idle', path, alt_grid, mach_grid, 'alt, ft', 'mach', tables['thrust_idle'])
plot_table2D('thrust_mil', path, alt_grid, mach_grid, 'alt, ft', 'mach', tables['thrust_mil'])
plot_table2D('thrust_max', path, alt_grid, mach_grid, 'alt, ft', 'mach', tables['thrust_max'])
plt.figure()
lift = []
for alpha in alpha_deg_grid:
lift.append(-tables['Cz'](alpha, 0, 0))
plt.plot(alpha_deg_grid, lift)
plt.xlabel('alpha, deg')
plt.ylabel('CL')
plt.savefig(path.joinpath('CL.png'))
plt.close()
plt.figure()
plot_table2D('amach', path, np.linspace(0, 1000), np.linspace(0, 60000), 'VT, ft/s', 'alt, ft', tables['amach'])
plt.close()
names = ['CXq', 'CYr', 'CYp', 'CZq', 'Clr', 'Clp', 'Cmq', 'Cnr', 'Cnp']
for name in names:
plt.figure()
data = [tables[name](alpha) for alpha in alpha_deg_grid]
plt.plot(alpha_deg_grid, data)
plt.xlabel('alpha, deg')
plt.ylabel(name)
plt.savefig(path.joinpath('damp_{:s}.png'.format(name)))
plt.close()
def test_jacobian():
x_sym = ca.MX.sym('x', 16)
u_sym = ca.MX.sym('u', 4)
x = f16.State.from_casadi(x_sym)
u = f16.Control.from_casadi(u_sym)
p = f16.Parameters()
dx = f16.dynamics(x, u, p)
A = ca.jacobian(dx.to_casadi(), x_sym)
B = ca.jacobian(dx.to_casadi(), u_sym)
f_A = ca.Function('A', [x_sym, u_sym], [A])
f_B = ca.Function('B', [x_sym, u_sym], [B])
print('A', f_A(np.ones(16), np.ones(4)))
print('B', f_B(np.ones(16), np.ones(4)))
def test_trim1():
# pg 197
p = f16.Parameters()
x = f16.State(VT=502, alpha=0.03691, theta=0.03691)
u = f16.Control(thtl=0.1385, elv_cmd_deg=-0.7588)
x = f16.trim_actuators(x, u)
dx = f16.dynamics(x, u, p)
print(dx)
assert f16.trim_cost(dx) < TRIM_TOL
def test_trim2():
# pg 197
p = f16.Parameters(xcg=0.3)
x = f16.State(VT=502, alpha=0.03936, theta=0.03936)
u = f16.Control(thtl=0.1485, elv_cmd_deg=-1.931)
x = f16.trim_actuators(x, u)
x.power = f16.tables['tgear'](u.thtl)
dx = f16.dynamics(x, u, p)
print(dx)
assert f16.trim_cost(dx) < TRIM_TOL
def test_trim3():
# pg 197
p = f16.Parameters(xcg=0.38)
x = f16.State(VT=502, alpha=0.03544, theta=0.03544)
u = f16.Control(thtl=0.1325, elv_cmd_deg=-0.0559)
x = f16.trim_actuators(x, u)
dx = f16.dynamics(x, u, p)
assert f16.trim_cost(dx) < TRIM_TOL
def test_trim4():
# pg 197
p = f16.Parameters(xcg=0.3)
# psi_dot = 0.3
x = f16.State(VT=502, alpha=0.2485, beta=4.8e-4, phi=1.367, theta=0.05185,
P=-0.0155, Q=0.2934, R=0.06071)
u = f16.Control(
thtl=0.8499, elv_cmd_deg=-6.256,
ail_cmd_deg=0.09891, rdr_cmd_deg=-0.4218)
x = f16.trim_actuators(x, u)
dx = f16.dynamics(x, u, p)
print(dx)
assert f16.trim_cost(dx) < TRIM_TOL
def test_trim5():
# pg 197
p = f16.Parameters(xcg=0.3) # listed as -0.3, must be typo
# theta_dot = 0.3
x = f16.State(VT=502, alpha=0.3006, beta=4.1e-5, theta=0.3006, Q=0.3)
u = f16.Control(
thtl=1.023, elv_cmd_deg=-7.082,
ail_cmd_deg=-6.2e-4, rdr_cmd_deg=0.01655)
x = f16.trim_actuators(x, u)
dx = f16.dynamics(x, u, p)
print(dx)
assert f16.trim_cost(dx) < 2e-2 # doesn't converge as close
def test_trim6():
# pg 195
p = f16.Parameters()
x = f16.State(VT=502, alpha=2.392628e-1, beta=5.061803e-4,
phi=1.366289, theta=5.000808e-2, psi=2.340769e-1,
P=-1.499617e-2, Q=2.933811e-1, R=6.084932e-2,
p_N=0, p_E=0, alt=0, power=6.412363e1)
u = f16.Control(thtl=8.349601e-1, elv_cmd_deg=-1.481766,
ail_cmd_deg=9.553108e-2, rdr_cmd_deg=-4.118124e-1)
x = f16.trim_actuators(x, u)
dx = f16.dynamics(x, u, p)
print(dx)
assert f16.trim_cost(dx) < TRIM_TOL
def test_trim_and_linearize():
p = f16.Parameters()
x = f16.State(VT=502)
x0, u0 = f16.trim(x=x, p=p, phi_dot=0, theta_dot=0, psi_dot=0, gam=0)
dx = f16.dynamics(x0, u0, p)
assert f16.trim_cost(dx) < TRIM_TOL
print(dx)
sys = f16.linearize(x0, u0, p)
sys.sub_system(['VT', 'elv_deg', 'alpha', 'Q'], ['elv_cmd_deg'], ['alpha', 'Q'])
print(sys)
ss = sys.to_control()
def test_table_3_5_2():
# pg 187
p = f16.Parameters(xcg=0.4)
x = f16.State(
VT=500, alpha=0.5, beta=-0.2,
phi=-1, theta=1, psi=-1,
P=0.7, Q=-0.8, R=0.9,
p_N=1000, p_E=900, alt=10000)
u = f16.Control(
thtl=0.9, elv_cmd_deg=20,
ail_cmd_deg=-15, rdr_cmd_deg=-20)
x = f16.trim_actuators(x, u)
x.power = 90
dx = f16.dynamics(x, u, p)
dx_compute = np.array(dx.to_casadi())[:, 0]
dx_check = np.array([
-75.23724, -0.8813491, -0.4759990,
2.505734, 0.3250820, 2.145926,
12.62679, 0.9649671, 0.5809759,
342.4439, -266.7707, 248.1241, -58.68999, 0, 0, 0
])
print('\nexpected:\n\t', dx_check)
print('\nactual:\n\t', dx_compute)
print('\nerror:\n\t', dx_check - dx_compute)
assert np.allclose(dx_compute, dx_check, 1e-3)
def test_simulate():
f_control = lambda t, x: f16.Control()
f16.simulate(x0=f16.State(VT=502), f_control= f_control,
p=f16.Parameters(), t0=0, tf=10, dt=0.01) | StarcoderdataPython |
1643171 | <reponame>eshatro/warehouse
from typing import List
from .article import Article
class Product:
def __init__(
self,
name: str,
articles: List[Article],
price: float = None,
possible_quantity: int = None,
):
self.name = name
self.price = price
self.articles = articles
self.possible_quantity = possible_quantity
def __str__(self):
return f"{self.name} ({self.possible_quantity})"
@property
def product_articles(self) -> set:
return {f.id for f in self.articles}
def calculate_possible_quantity(self, available_articles):
possible_quantity = 99999999999999
if not available_articles:
self.possible_quantity = 0
return self.possible_quantity
for product_article in self.articles:
stock_article = available_articles.get(product_article.id)
times = int(stock_article.available_stock / product_article.quantity)
if times <= 0:
self.possible_quantity = 0
return self.possible_quantity
if times < possible_quantity:
possible_quantity = times
self.possible_quantity = possible_quantity
return possible_quantity
def are_product_articles_in_stock(self, available_articles):
return (
self.product_articles.issubset(available_articles)
and self.calculate_possible_quantity(available_articles) > 0
)
| StarcoderdataPython |
3399620 | <reponame>doitintl/elastic-event-store<gh_stars>10-100
import pytest
import uuid
from unittest import TestCase
from tests.integration.api_test_client import ApiTestClient
@pytest.mark.slow
class TestFetchingEvents(TestCase):
api = None
def setUp(self) -> None:
self.api = self.api or ApiTestClient()
return super().setUp()
def test_fetch_events(self):
stream_id = str(uuid.uuid4())
self.api.commit(
stream_id=stream_id,
last_changeset_id=0,
metadata=self.api.some_metadata,
events=[
{ "type": "init", "foo": "bar" },
{ "type": "update", "foo": "baz" },
{ "type": "switch", "baz": "foo" },
{ "type": "modify", "baz": "bar" },
]
)
response = self.api.query_events(stream_id)
self.assertDictEqual(response.json(), {
"stream_id": stream_id,
"events": [
{ "id": 1, "data": { "type": "init", "foo": "bar" } },
{ "id": 2, "data": { "type": "update", "foo": "baz" } },
{ "id": 3, "data": { "type": "switch", "baz": "foo" } },
{ "id": 4, "data": { "type": "modify", "baz": "bar" } }
]
})
def test_fetch_single_event(self):
stream_id = str(uuid.uuid4())
self.api.commit(
stream_id=stream_id,
last_changeset_id=0,
metadata=self.api.some_metadata,
events=[
{ "type": "init", "foo": "bar" },
{ "type": "update", "foo": "baz" },
{ "type": "switch", "baz": "foo" },
{ "type": "modify", "baz": "bar" },
]
)
response = self.api.query_events(stream_id, from_event=3, to_event=3)
self.assertDictEqual(response.json(), {
"stream_id": stream_id,
"events": [ { "id": 3, "data": { "type": "switch", "baz": "foo" } } ]
})
def test_fetch_events_from_number(self):
stream_id = str(uuid.uuid4())
self.api.commit(
stream_id=stream_id,
last_changeset_id=0,
metadata=self.api.some_metadata,
events=[
{ "type": "init", "foo": "bar" },
{ "type": "update", "foo": "baz" },
{ "type": "switch", "baz": "foo" },
{ "type": "modify", "baz": "bar" },
]
)
response = self.api.query_events(stream_id, from_event=3)
self.assertDictEqual(response.json(), {
"stream_id": stream_id,
"events": [
{ "id": 3, "data": { "type": "switch", "baz": "foo" } },
{ "id": 4, "data": { "type": "modify", "baz": "bar" } }
]
})
def test_fetch_events_to_number(self):
stream_id = str(uuid.uuid4())
self.api.commit(
stream_id=stream_id,
last_changeset_id=0,
metadata=self.api.some_metadata,
events=[
{ "type": "init", "foo": "bar" },
{ "type": "update", "foo": "baz" },
{ "type": "switch", "baz": "foo" },
{ "type": "modify", "baz": "bar" },
]
)
response = self.api.query_events(stream_id, to_event=3)
self.assertDictEqual(response.json(), {
"stream_id": stream_id,
"events": [
{ "id": 1, "data": { "type": "init", "foo": "bar" } },
{ "id": 2, "data": { "type": "update", "foo": "baz" } },
{ "id": 3, "data": { "type": "switch", "baz": "foo" } },
]
})
def test_fetch_events_from_and_to_numbers(self):
stream_id = str(uuid.uuid4())
self.api.commit(
stream_id=stream_id,
last_changeset_id=0,
metadata=self.api.some_metadata,
events=[
{ "type": "init", "foo": "bar" },
{ "type": "update", "foo": "baz" },
{ "type": "switch", "baz": "foo" },
{ "type": "modify", "baz": "bar" },
]
)
response = self.api.query_events(stream_id, from_event=2, to_event=3)
self.assertDictEqual(response.json(), {
"stream_id": stream_id,
"events": [
{ "id": 2, "data": { "type": "update", "foo": "baz" } },
{ "id": 3, "data": { "type": "switch", "baz": "foo" } },
]
})
def test_fetch_events_from_and_to_numbers_across_multiple_commits(self):
stream_id = str(uuid.uuid4())
self.api.commit(
stream_id=stream_id,
last_changeset_id=0,
metadata=self.api.some_metadata,
events=[
{ "type": "init", "foo": "bar" },
{ "type": "update", "foo": "baz" },
{ "type": "switch", "baz": "foo" },
{ "type": "modify", "baz": "bar" },
]
)
self.api.commit(
stream_id=stream_id,
last_changeset_id=1,
metadata=self.api.some_metadata,
events=[
{ "type": "update", "baz": "bar" },
{ "type": "switch", "foo": "baz" },
{ "type": "modify", "bar": "foo" },
]
)
response = self.api.query_events(stream_id, from_event=3, to_event=6)
self.assertDictEqual(response.json(), {
"stream_id": stream_id,
"events": [
{ "id": 3, "data": { "type": "switch", "baz": "foo" } },
{ "id": 4, "data": { "type": "modify", "baz": "bar" }, },
{ "id": 5, "data": { "type": "update", "baz": "bar" }, },
{ "id": 6, "data": { "type": "switch", "foo": "baz" }, },
]
})
def test_invalid_querying_params1(self):
stream_id = str(uuid.uuid4())
response = self.api.query_events(stream_id, from_event=4, to_event=3)
assert response.status_code == 400
self.assertDictEqual(response.json(), {
"stream_id": stream_id,
"error": "INVALID_EVENT_FILTERING_PARAMS",
"message": 'The higher boundary cannot be lower than the lower boundary: 4(from) > 3(to)'
})
def test_invalid_querying_params2(self):
stream_id = str(uuid.uuid4())
response = self.api.query_events(stream_id, from_event="test")
assert response.status_code == 400
self.assertDictEqual(response.json(), {
"stream_id": stream_id,
"error": "INVALID_EVENT_FILTERING_PARAMS",
"message": 'The filtering params(from, to) have to be positive integer values'
})
def test_invalid_querying_params3(self):
stream_id = str(uuid.uuid4())
response = self.api.query_events(stream_id, to_event="test")
assert response.status_code == 400
self.assertDictEqual(response.json(), {
"stream_id": stream_id,
"error": "INVALID_EVENT_FILTERING_PARAMS",
"message": 'The filtering params(from, to) have to be positive integer values'
})
def test_no_stream_id(self):
response = self.api.query_events("")
assert response.status_code == 400
self.assertDictEqual(response.json(), {
"error": "MISSING_STREAM_ID",
"message": 'stream_id is a required value'
})
def test_fetching_unexisting_stream(self):
response = self.api.query_events("abcd")
assert response.status_code == 404
self.assertDictEqual(response.json(), {
"stream_id": "abcd",
"error": "STREAM_NOT_FOUND",
"message": f'The specified stream(abcd) doesn\'t exist'
})
def test_fetch_unexisting_events(self):
stream_id = str(uuid.uuid4())
self.api.commit(
stream_id=stream_id,
last_changeset_id=0,
metadata=self.api.some_metadata,
events=self.api.some_events
)
response = self.api.query_events(stream_id, from_event=200)
self.assertDictEqual(response.json(), {
"stream_id": stream_id,
"events": [ ]
}) | StarcoderdataPython |
5062048 | import logging
import unittest
from riboplot import ribocore, riboplot, ribocount
# use testing configuration
CFG = ribocount.CONFIG = riboplot.config.TestingConfig()
logging.disable(logging.CRITICAL)
class RiboCountTestCase(unittest.TestCase):
def test_unrelated_fasta_file(self):
"""If an unrelated fasta file is used, raise an error"""
parser = ribocount.create_parser()
args = parser.parse_args(['-b', CFG.RIBO_FILE, '-f', CFG.UNRELATED_FASTA])
self.assertRaises(ribocore.ArgumentError, ribocount.main, args)
def test_get_ribo_counts(self):
"""Get read counts upstream of the longest ORF"""
parser = ribocount.create_parser()
args = parser.parse_args(['-b', CFG.RIBO_FILE, '-f', CFG.UNRELATED_FASTA, '-l', '0', '-s', '0'])
with ribocore.open_pysam_file(args.ribo_file, ftype='bam') as f:
# pass transcript name explicitly as it is not one of the command
# line options
counts, reads = ribocore.get_ribo_counts(ribo_fileobj=f, transcript_name=CFG.TRANSCRIPT_NAME,
read_lengths=args.read_lengths, read_offsets=args.read_offsets)
# 529 is the start position of the longest orf for the test transcript
counts_upstream, reads_upstream = ribocore.filter_ribo_counts(counts=counts, orf_start=529)
print '\nTotal read counts: {}\nUpstream read counts: {}'.format(reads, reads_upstream)
self.assertTrue(reads_upstream > 1, msg='There should be reads upstream for this BAM file')
self.assertTrue(reads > 1, msg='There should be Ribo-Seq reads for this BAM file')
self.assertTrue(len(counts) > len(counts_upstream), msg='Total read counts should be higher than upstream read counts')
| StarcoderdataPython |
3595367 | <reponame>enricobacis/bigstack
import sys as _sys
import types as _types
import functools as _functools
import threading as _threading
import multiprocessing.pool as _mpool
_k = (2 ** 10)
_M = (2 ** 10) * _k
_G = (2 ** 10) * _M
_lock = _threading.Lock()
def bigstack(*args, **kwargs):
'''Decorator that increases the stack size of a function and the recursion
limit. The function runs in a separated thread with a stack size specified
by the 'stacksize' parameter (default: 128MiB). Also the recursion limit can
be modified by the 'recursionlimit' parameter (default: 1M), but be aware
that this is a variable shared by the whole python environment, so a
subsequent invocation of a decorated function may change it.'''
stacksize = kwargs.get('stacksize', 128 * _M)
recursionlimit = kwargs.get('recursionlimit', _M)
def _decorator(fn):
'''This is the bigstack decorator itself.'''
@_functools.wraps(fn)
def _fn(*args, **kwargs):
# no two functions can change the stack size
with _lock:
_threading.stack_size(stacksize)
_sys.setrecursionlimit(recursionlimit)
# only new threads get the redefined stack size
pool = _mpool.ThreadPool(processes=1)
async_result = pool.apply_async(fn, args, kwargs)
return async_result.get()
return _fn
if not args:
return _decorator
# return the decorated function when used without keyword arguments
if not isinstance(args[0], _types.FunctionType):
raise ValueError('use keyword argument as bigstack parameters')
return _decorator(args[0])
| StarcoderdataPython |
6700307 | <filename>pooling/settings/__init__.py
import os
from .base import *
# you need to set "DJANGO_SETTINGS_MODULE = 'prod'" as an environment variable
# in your OS (on which your website is hosted)
if os.environ['DJANGO_SETTINGS_MODULE'] == 'prod':
from .prod import *
elif os.environ['DJANGO_SETTINGS_MODULE'] == 'stage':
from .stage import *
else :
from .dev import * | StarcoderdataPython |
182189 | <reponame>IASA-SA-2020/normalize
# -*- coding: utf-8 -*-
from __future__ import absolute_import
from konlpy import utils
import os
class CorpusLoader():
"""Loader for corpora.
For a complete list of corpora available in KoNLPy,
refer to :ref:`corpora`.
.. code-block:: python
>>> from konlpy.corpus import kolaw
>>> fids = kolaw.fileids()
>>> fobj = kolaw.open(fids[0])
>>> print fobj.read(140)
대한민국헌법
유구한 역사와 전통에 빛나는 우리 대한국민은 3·1운동으로 건립된 대한민국임시정부의 법통과 불의에 항거한 4·19민주이념을 계승하고, 조국의 민주개혁과 평화적 통일의 사명에 입각하여 정의·인도와 동포애로써 민족의 단결을 공고히 하고, 모든 사회적 폐습과 불의를 타파하며, 자율과 조화를 바 바
"""
def abspath(self, filename=None):
"""Absolute path of corpus file.
If ``filename`` is *None*, returns absolute path of corpus.
:param filename: Name of a particular file in the corpus.
"""
basedir = '%s/data/corpus/%s' % (utils.installpath, self.name)
if filename:
return '%s/%s' % (basedir, filename)
else:
return '%s/' % basedir
def fileids(self):
"""List of file IDs in the corpus."""
return os.listdir(self.abspath())
def open(self, filename):
"""Method to open a file in the corpus.
Returns a file object.
:param filename: Name of a particular file in the corpus.
"""
return utils.load_txt(self.abspath(filename))
def __init__(self, name=None):
if not name:
raise Exception("You need to input the name of the corpus")
else:
self.name = name
kolaw = CorpusLoader('kolaw')
kobill = CorpusLoader('kobill')
| StarcoderdataPython |
11325250 | <reponame>tina300399/torchgeometry<gh_stars>0
from typing import Tuple, Optional
import torch
import torch.nn.functional as F
from torchgeometry.core.conversions import deg2rad
from torchgeometry.core.homography_warper import homography_warp
__all__ = [
"warp_perspective",
"warp_affine",
"get_perspective_transform",
"get_rotation_matrix2d",
"normal_transform_pixel",
]
def normal_transform_pixel(height, width):
tr_mat = torch.Tensor([[1.0, 0.0, -1.0],
[0.0, 1.0, -1.0],
[0.0, 0.0, 1.0]]) # 1x3x3
tr_mat[0, 0] = tr_mat[0, 0] * 2.0 / (width - 1.0)
tr_mat[1, 1] = tr_mat[1, 1] * 2.0 / (height - 1.0)
tr_mat = tr_mat.unsqueeze(0)
return tr_mat
def dst_norm_to_dst_norm(dst_pix_trans_src_pix, dsize_src, dsize_dst):
# source and destination sizes
src_h, src_w = dsize_src
dst_h, dst_w = dsize_dst
# the devices and types
device = dst_pix_trans_src_pix.device
dtype = dst_pix_trans_src_pix.dtype
# compute the transformation pixel/norm for src/dst
src_norm_trans_src_pix = normal_transform_pixel(
src_h, src_w).to(device).to(dtype)
src_pix_trans_src_norm = torch.inverse(src_norm_trans_src_pix)
dst_norm_trans_dst_pix = normal_transform_pixel(
dst_h, dst_w).to(device).to(dtype)
# compute chain transformations
dst_norm_trans_src_norm = torch.matmul(
dst_norm_trans_dst_pix, torch.matmul(
dst_pix_trans_src_pix, src_pix_trans_src_norm))
return dst_norm_trans_src_norm
def transform_warp_impl(src, dst_pix_trans_src_pix, dsize_src, dsize_dst):
"""Compute the transform in normalized cooridnates and perform the warping.
"""
dst_norm_trans_dst_norm = dst_norm_to_dst_norm(
dst_pix_trans_src_pix, dsize_src, dsize_dst)
return homography_warp(src, torch.inverse(
dst_norm_trans_dst_norm), dsize_dst)
def warp_perspective(src, M, dsize, flags='bilinear', border_mode=None,
border_value=0):
r"""Applies a perspective transformation to an image.
The function warp_perspective transforms the source image using
the specified matrix:
.. math::
\text{dst} (x, y) = \text{src} \left(
\frac{M_{11} x + M_{12} y + M_{13}}{M_{31} x + M_{32} y + M_{33}} ,
\frac{M_{21} x + M_{22} y + M_{23}}{M_{31} x + M_{32} y + M_{33}}
\right )
Args:
src (torch.Tensor): input image.
M (Tensor): transformation matrix.
dsize (tuple): size of the output image (height, width).
Returns:
Tensor: the warped input image.
Shape:
- Input: :math:`(B, C, H, W)` and :math:`(B, 3, 3)`
- Output: :math:`(B, C, H, W)`
.. note::
See a working example `here <https://github.com/arraiy/torchgeometry/
blob/master/examples/warp_perspective.ipynb>`_.
"""
if not torch.is_tensor(src):
raise TypeError("Input src type is not a torch.Tensor. Got {}"
.format(type(src)))
if not torch.is_tensor(M):
raise TypeError("Input M type is not a torch.Tensor. Got {}"
.format(type(M)))
if not len(src.shape) == 4:
raise ValueError("Input src must be a BxCxHxW tensor. Got {}"
.format(src.shape))
if not (len(M.shape) == 3 or M.shape[-2:] == (3, 3)):
raise ValueError("Input M must be a Bx3x3 tensor. Got {}"
.format(src.shape))
# launches the warper
return transform_warp_impl(src, M, (src.shape[-2:]), dsize)
def warp_affine(src: torch.Tensor,
M: torch.Tensor,
dsize: Tuple[int,
int],
flags: Optional[str] = 'bilinear',
padding_mode: Optional[str] = 'zeros') -> torch.Tensor:
r"""Applies an affine transformation to a tensor.
The function warp_affine transforms the source tensor using
the specified matrix:
.. math::
\text{dst}(x, y) = \text{src} \left( M_{11} x + M_{12} y + M_{13} ,
M_{21} x + M_{22} y + M_{23} \right )
Args:
src (torch.Tensor): input tensor of shape :math:`(B, C, H, W)`.
M (torch.Tensor): affine transformation of shape :math:`(B, 2, 3)`.
dsize (Tuple[int, int]): size of the output image (height, width).
mode (Optional[str]): interpolation mode to calculate output values
'bilinear' | 'nearest'. Default: 'bilinear'.
padding_mode (Optional[str]): padding mode for outside grid values
'zeros' | 'border' | 'reflection'. Default: 'zeros'.
Returns:
torch.Tensor: the warped tensor.
Shape:
- Output: :math:`(B, C, H, W)`
.. note::
See a working example `here <https://github.com/arraiyopensource/
torchgeometry/blob/master/docs/source/warp_affine.ipynb>`__.
"""
if not torch.is_tensor(src):
raise TypeError("Input src type is not a torch.Tensor. Got {}"
.format(type(src)))
if not torch.is_tensor(M):
raise TypeError("Input M type is not a torch.Tensor. Got {}"
.format(type(M)))
if not len(src.shape) == 4:
raise ValueError("Input src must be a BxCxHxW tensor. Got {}"
.format(src.shape))
if not (len(M.shape) == 3 or M.shape[-2:] == (2, 3)):
raise ValueError("Input M must be a Bx2x3 tensor. Got {}"
.format(src.shape))
# we generate a 3x3 transformation matrix from 2x3 affine
M_3x3: torch.Tensor = F.pad(M, (0, 0, 0, 1, 0, 0),
mode="constant", value=0)
M_3x3[:, 2, 2] += 1.0
# launches the warper
return transform_warp_impl(src, M_3x3, (src.shape[-2:]), dsize)
def get_perspective_transform(src, dst):
r"""Calculates a perspective transform from four pairs of the corresponding
points.
The function calculates the matrix of a perspective transform so that:
.. math ::
\begin{bmatrix}
t_{i}x_{i}^{'} \\
t_{i}y_{i}^{'} \\
t_{i} \\
\end{bmatrix}
=
\textbf{map_matrix} \cdot
\begin{bmatrix}
x_{i} \\
y_{i} \\
1 \\
\end{bmatrix}
where
.. math ::
dst(i) = (x_{i}^{'},y_{i}^{'}), src(i) = (x_{i}, y_{i}), i = 0,1,2,3
Args:
src (Tensor): coordinates of quadrangle vertices in the source image.
dst (Tensor): coordinates of the corresponding quadrangle vertices in
the destination image.
Returns:
Tensor: the perspective transformation.
Shape:
- Input: :math:`(B, 4, 2)` and :math:`(B, 4, 2)`
- Output: :math:`(B, 3, 3)`
"""
if not torch.is_tensor(src):
raise TypeError("Input type is not a torch.Tensor. Got {}"
.format(type(src)))
if not torch.is_tensor(dst):
raise TypeError("Input type is not a torch.Tensor. Got {}"
.format(type(dst)))
if not src.shape[-2:] == (4, 2):
raise ValueError("Inputs must be a Bx4x2 tensor. Got {}"
.format(src.shape))
if not src.shape == dst.shape:
raise ValueError("Inputs must have the same shape. Got {}"
.format(dst.shape))
if not (src.shape[0] == dst.shape[0]):
raise ValueError("Inputs must have same batch size dimension. Got {}"
.format(src.shape, dst.shape))
def ax(p, q):
ones = torch.ones_like(p)[..., 0:1]
zeros = torch.zeros_like(p)[..., 0:1]
return torch.cat(
[p[:, 0:1], p[:, 1:2], ones, zeros, zeros, zeros,
-p[:, 0:1] * q[:, 0:1], -p[:, 1:2] * q[:, 0:1]
], dim=1)
def ay(p, q):
ones = torch.ones_like(p)[..., 0:1]
zeros = torch.zeros_like(p)[..., 0:1]
return torch.cat(
[zeros, zeros, zeros, p[:, 0:1], p[:, 1:2], ones,
-p[:, 0:1] * q[:, 1:2], -p[:, 1:2] * q[:, 1:2]], dim=1)
# we build matrix A by using only 4 point correspondence. The linear
# system is solved with the least square method, so here
# we could even pass more correspondence
p = []
p.append(ax(src[:, 0], dst[:, 0]))
p.append(ay(src[:, 0], dst[:, 0]))
p.append(ax(src[:, 1], dst[:, 1]))
p.append(ay(src[:, 1], dst[:, 1]))
p.append(ax(src[:, 2], dst[:, 2]))
p.append(ay(src[:, 2], dst[:, 2]))
p.append(ax(src[:, 3], dst[:, 3]))
p.append(ay(src[:, 3], dst[:, 3]))
# A is Bx8x8
A = torch.stack(p, dim=1)
# b is a Bx8x1
b = torch.stack([
dst[:, 0:1, 0], dst[:, 0:1, 1],
dst[:, 1:2, 0], dst[:, 1:2, 1],
dst[:, 2:3, 0], dst[:, 2:3, 1],
dst[:, 3:4, 0], dst[:, 3:4, 1],
], dim=1)
# solve the system Ax = b
X, LU = torch.gesv(b, A)
# create variable to return
batch_size = src.shape[0]
M = torch.ones(batch_size, 9, device=src.device, dtype=src.dtype)
M[..., :8] = torch.squeeze(X, dim=-1)
return M.view(-1, 3, 3) # Bx3x3
def get_rotation_matrix2d(center, angle, scale):
r"""Calculates an affine matrix of 2D rotation.
The function calculates the following matrix:
.. math::
\begin{bmatrix}
\alpha & \beta & (1 - \alpha) \cdot \text{x}
- \beta \cdot \text{y} \\
-\beta & \alpha & \beta \cdot \text{x}
+ (1 - \alpha) \cdot \text{y}
\end{bmatrix}
where
.. math::
\alpha = \text{scale} \cdot cos(\text{angle}) \\
\beta = \text{scale} \cdot sin(\text{angle})
The transformation maps the rotation center to itself
If this is not the target, adjust the shift.
Args:
center (Tensor): center of the rotation in the source image.
angle (Tensor): rotation angle in degrees. Positive values mean
counter-clockwise rotation (the coordinate origin is assumed to
be the top-left corner).
scale (Tensor): isotropic scale factor.
Returns:
Tensor: the affine matrix of 2D rotation.
Shape:
- Input: :math:`(B, 2)`, :math:`(B)` and :math:`(B)`
- Output: :math:`(B, 2, 3)`
Example:
>>> center = torch.zeros(1, 2)
>>> scale = torch.ones(1)
>>> angle = 45. * torch.ones(1)
>>> M = tgm.get_rotation_matrix2d(center, angle, scale)
tensor([[[ 0.7071, 0.7071, 0.0000],
[-0.7071, 0.7071, 0.0000]]])
"""
if not torch.is_tensor(center):
raise TypeError("Input center type is not a torch.Tensor. Got {}"
.format(type(center)))
if not torch.is_tensor(angle):
raise TypeError("Input angle type is not a torch.Tensor. Got {}"
.format(type(angle)))
if not torch.is_tensor(scale):
raise TypeError("Input scale type is not a torch.Tensor. Got {}"
.format(type(scale)))
if not (len(center.shape) == 2 and center.shape[1] == 2):
raise ValueError("Input center must be a Bx2 tensor. Got {}"
.format(center.shape))
if not len(angle.shape) == 1:
raise ValueError("Input angle must be a B tensor. Got {}"
.format(angle.shape))
if not len(scale.shape) == 1:
raise ValueError("Input scale must be a B tensor. Got {}"
.format(scale.shape))
if not (center.shape[0] == angle.shape[0] == scale.shape[0]):
raise ValueError("Inputs must have same batch size dimension. Got {}"
.format(center.shape, angle.shape, scale.shape))
# convert angle and apply scale
angle_rad = deg2rad(angle)
alpha = torch.cos(angle_rad) * scale
beta = torch.sin(angle_rad) * scale
# unpack the center to x, y coordinates
x, y = center[..., 0], center[..., 1]
# create output tensor
batch_size, _ = center.shape
M = torch.zeros(batch_size, 2, 3, device=center.device, dtype=center.dtype)
M[..., 0, 0] = alpha
M[..., 0, 1] = beta
M[..., 0, 2] = (1. - alpha) * x - beta * y
M[..., 1, 0] = -beta
M[..., 1, 1] = alpha
M[..., 1, 2] = beta * x + (1. - alpha) * y
return M
| StarcoderdataPython |
5050065 | from django.urls import reverse
from seahub.test_utils import BaseTestCase
class ConvertCmmtDescLinkTest(BaseTestCase):
def setUp(self):
self.login_as(self.user)
def test_can_render(self):
pass
# resp = self.client.get(reverse('convert_cmmt_desc_link') + '?repo_id=' + self.repo.id + '&cmmt_id=xxx' + '&nm=foo')
# self.assertEqual(200, resp.status_code)
| StarcoderdataPython |
49924 | from .client import SocketClient
from .server import (
SocketServer,
SocketServerHandler,
)
| StarcoderdataPython |
197669 | <filename>cvv/methods/grades.py<gh_stars>0
"""grades"""
from datetime import datetime
import numpy as np
from requests import get
from lxml import html
from ..data_types import Grade
class Grades:
"""grades parsing class"""
def __init__(self, cvv):
self.cvv = cvv
if not self.cvv.grades:
self.retrieve_grades()
def _do_grades(self):
grades = get(self.cvv.endpoint +
'/cvv/app/default/genitori_voti.php',
cookies=self.cvv.cookies,
headers=self.cvv.headers)
if grades.status_code != 200:
raise self.cvv.GenericError
return grades.text
@classmethod
def sanitize_grade(cls, grade):
"""sanitize grade"""
bad_words = ['+', '-', '½']
irc = {
'o': 10,
'd': 8,
'b': 7,
's': 6,
'ns': 5
}
if grade in irc:
return irc[grade]
grade_new = int(grade.strip().rstrip(''.join(bad_words)))
if bad_words[0] in grade:
grade_new += 0.25
if bad_words[1] in grade:
grade_new -= 0.25
if bad_words[2] in grade:
grade_new += 0.5
return grade_new
@classmethod
def _trend(cls, grades_list):
coeffs = np.polyfit(range(1, len(grades_list)+1), grades_list, 1)
slope = coeffs[-2]
return float(slope)
def retrieve_grades(self):
"""parse grades"""
tree = html.fromstring(self._do_grades())
school_terms = tree.xpath('//*[@class="outer"]/@id')
for term in school_terms:
grades_dict = {}
trs = tree.xpath(
f"//*[@id=\"{term}\"]//"
f"table[@sessione=\"{term}\"]//"
f"tr[contains(@sessione, \"{term}\") and "
f"contains(@class, \"riga_materia_componente\")]"
)
for xpath_tr in trs:
subject = xpath_tr.xpath(
'td')[0].text_content().strip().capitalize()
grades_dict[subject] = []
voti = xpath_tr.xpath(
'td[@class="registro cella_voto :sfondocella:"]')
for voto in voti:
grade_date = voto.xpath('span')[0].text_content()
if int(grade_date.split('/')[1]) > 8:
grade_year = datetime.now().year-1
else:
grade_year = datetime.now().year
grades_dict[subject].append(Grade(
voto.xpath(
'div/p')[0].text_content(),
f'{grade_date}/{grade_year}'))
if len(grades_dict):
self.cvv.grades[term] = grades_dict
return self.cvv.grades
def get_grades(self):
"""get grades"""
return self.cvv.grades
def get_terms_keys(self):
"""get school terms keys"""
return list(self.get_grades().keys())
def get_subject_keys(self, index):
"""get subject keys"""
return list(self.get_grades()[index].keys())
def _get_all_grades(self, index):
grades = []
if not self.get_grades()[index]:
return 0.0
for subject in self.get_grades()[index]:
for grade in self.get_grades()[index][subject]:
grades.append(self.sanitize_grade(grade.grade))
return grades
def get_subject_average(self, index, subject):
"""get subject average"""
if not self.get_grades()[index][subject]:
return 0.0
return np.average([self.sanitize_grade(grade.grade) for grade
in self.get_grades()[index][subject]])
def get_average(self, index):
"""get average"""
return np.average(self._get_all_grades(index))
def get_trend(self, index, subject):
"""get trend"""
grades = [self.sanitize_grade(x.grade)
for x in self.get_grades()[index][subject]]
if len(grades) <= 1 or len(set(grades)) == 1:
return None
trend = self._trend(grades)
return trend > 0 < trend
| StarcoderdataPython |
6505783 | <reponame>ecdavis/pants<gh_stars>10-100
###############################################################################
#
# Copyright 2011-2012 Pants Developers (see AUTHORS.txt)
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
###############################################################################
"""
This is the asynchronous request helper for the Application system, utilizing
generator coroutines for optimal performance and ease of development.
"""
# Warning: This whole file is pretty much weird magic.
import json
import traceback
import weakref
from functools import wraps
from types import GeneratorType
from pants.http.utils import HTTPHeaders
from pants.web.application import Application, error, Response, RequestContext
from pants.web.utils import HTTPException, HTTPTransparentRedirect, log
###############################################################################
# Constants
###############################################################################
Again = object()
Waiting = object()
Finished = object()
###############################################################################
# Storage
###############################################################################
receivers = {}
###############################################################################
# Exceptions
###############################################################################
class TimeoutError(Exception):
"""
Instances of TimeoutError are raised into an asynchronous request handler
when an :func:`async.wait` or :func:`async.receive` timeout.
"""
pass
class RequestClosed(Exception):
"""
An instance of RequestClosed is raised into an asynchronous request handler
when the connection for the request is closed.
"""
pass
###############################################################################
# Basic Asynchronous Requests
###############################################################################
def async(func):
"""
The ``@async`` decorator is used in conjunction with
:class:`pants.web.Application` to create asynchronous request handlers using
generators. This is useful for performing database lookups and doing other
I/O bound tasks without blocking the server. The following example performs
a simple database lookup with a `fork <https://github.com/stendec/asyncmongo>`_
of `asyncmongo <https://github.com/bitly/asyncmongo>`_ that adds support for
Pants. It then uses `jinja2 <http://jinja.pocoo.org/>`_ templates to render
the response.
.. code-block:: python
from pants.web import Application, async
import jinja2
import asyncmongo
database_options = {
'host': '127.0.0.1',
'port': 27017,
'dbname': 'test',
}
db = asyncmongo.Client(pool_id='web', backend='pants', **database_options)
app = Application()
env = jinja2.Environment(loader=jinja2.FileSystemLoader("templates"))
index_template = env.get_template("index.html")
@app.route("/")
@async
def index(request):
results = yield async.run(db.news.find, {'published': True})
yield index_template.render(data=results)
app.run()
Additionally, the @async decorator also allows for the easy implementation
of server-sent events, including support for the ``text/event-stream``
Content-Type used by HTML5 ```EventSource
<http://dev.w3.org/html5/eventsource/>`_``.
.. seealso::
:func:`async.stream`, :func:`async.event_stream`
"""
@wraps(func)
def wrapper(request, *args, **kwargs):
# Set a bit of state for the request.
request._writer = _async_finish
_init(request)
# Create the generator.
try:
request._gen = gen = func(request, *args, **kwargs)
except Exception:
_cleanup(request)
raise
# If we've not got a generator, return the output.
if not isinstance(gen, GeneratorType):
_cleanup(request)
return gen
# Set a flag on the request so Application won't finish processing it.
request.auto_finish = False
# Now let's run the generator for the first time. No input yet, for
# obvious reasons.
_do(request, None)
return wrapper
def _async_finish(request, output):
"""
Write the provided output to the request and finish the request.
"""
if request._started:
request.connection.close(False)
_cleanup(request)
return
# Do things App style.
with request._context as app:
request.auto_finish = True
try:
if output is Finished:
raise RuntimeError("Reached StopIteration in asynchronous "
"request handler.")
app.parse_output(output)
except Exception as err:
if request._started:
request.connection.close(False)
_cleanup(request)
return
request._tb = traceback.format_exc()
try:
body, status, headers = app.handle_500(request, err)
except Exception:
log.exception("There was a problem handling an asynchronous "
"request, and a problem running "
"Application.handle_500 for %r." % app)
body, status, headers = error(500, request=request)
request.send_status(status)
if not 'Content-Length' in headers:
headers['Content-Length'] = len(body)
request.send_headers(headers)
request.write(body)
request.finish()
# Finish cleanup.
_cleanup(request)
###############################################################################
# Asynchronous Streams
###############################################################################
def stream(func):
"""
The ``@async.stream`` decorator is used to create asynchronous request
handlers using generators. This can be used to begin writing a portion of
the response to the client before the entire response can be generated.
The very first yielded output is processed for a status code and headers
using the same logic that :class:`~pants.web.Application` uses for its
standard route functions.
Subsequently yielded values are *not* processed, so returning a status code
and/or headers in that situation will result in undesired output. You may
return an instance of :class:`pants.web.Response` *or* the bare value to
write out.
The following is, though not particularly useful, an example::
@app.route("/")
@async.stream
def index(request):
yield None, 200, {'X-Pizza': 'Yum'}
yield "This is an example.\n"
yield "It isn't particularly useful.\n"
yield ("This will be treated as a list and serialized with "
"JSON because you can't set the status code or provide "
"additional headers after the response has started."), 401
"""
@wraps(func)
def wrapped(request, *args, **kwargs):
# Set a bit of state for the request.
request._writer = _stream_output
_init(request)
# Create the generator.
try:
request._gen = gen = func(request, *args, **kwargs)
except Exception:
_cleanup(request)
raise
# If we've not got a generator, return the output.
if not isinstance(gen, GeneratorType):
_cleanup(request)
return gen
# Set a flag on the request so Application won't finish processing it.
request.auto_finish = False
# Now let's run the generator for the first time. No input yet, for
# obvious reasons.
_do(request, None)
return wrapped
async.stream = stream
def _stream_output(request, output):
"""
Write the provided chunk of data to the stream. This will automatically
encode the output as Transfer-Encoding: chunked if necessary.
"""
if request._started:
if not output or output is Finished:
# We're finished.
if request._chunked:
request.write("0\r\n\r\n\r\n")
request.finish()
_cleanup(request)
return
# Go ahead and cast the body and send it.
if isinstance(output, Response):
output = output.body
try:
output = _cast(request, output)
except Exception:
log.exception("Error casting output for asynchronous stream.")
request.connection.close(False)
_cleanup(request)
return
if request._chunked:
request.write("%x\r\n%s\r\n" % (len(output), output))
return Again
# Assume that the first message has status and a header.
if isinstance(output, Response):
output, status, headers = output.body, output.status, output.headers
elif isinstance(output, tuple):
if len(output) == 3:
output, status, headers = output
else:
output, status = output
headers = HTTPHeaders()
else:
status = 200
headers = HTTPHeaders()
# Use the rule headers stuff.
if request._rule_headers:
if isinstance(request._rule_headers, HTTPHeaders):
rule_headers = request._rule_headers.copy()
else:
rule_headers = HTTPHeaders(request._rule_headers)
if isinstance(headers, HTTPHeaders):
rule_headers._data.update(headers._data)
else:
rule_headers.update(headers)
headers = rule_headers
if request._rule_content_type and not 'Content-Type' in headers:
headers['Content-Type'] = request._rule_content_type
# Check for a character encoding.
content_type = headers.get('Content-Type', '')
if 'charset=' in content_type:
request._charset = content_type.split('charset=',1)[1].strip()
# Check the body to guess a Content-Type.
if not 'Content-Type' in headers:
if hasattr(output, "to_html") or (isinstance(output, basestring) and
output[:5].lower() in ('<html', '<!doc')):
headers['Content-Type'] = 'text/html; charset=%s' % request._charset
elif isinstance(output, (tuple, list, dict)):
headers['Content-Type'] = 'application/json'
else:
headers['Content-Type'] = 'text/plain; charset=%s' % request._charset
# Finally, cast the body.
errored = False
if output is not None:
try:
output = _cast(request, output)
except Exception as err:
errored = True
with request._context as app:
try:
output, status, headers = app.handle_500(request, err)
except Exception:
output, status, headers = error(500, request=request)
if not 'Content-Length' in headers:
headers['Content-Length'] = len(output)
# Make sure the client has some way of determining the length.
if not 'Content-Length' in headers and not 'Transfer-Encoding' in headers:
headers['Transfer-Encoding'] = 'chunked'
request._chunked = True
# Now, send it all out.
request.send_status(status)
request.send_headers(headers)
if request.method.upper() == 'HEAD':
request.finish()
_cleanup(request)
return
if output is not None:
if request._chunked:
request.write("%x\r\n%s\r\n" % (len(output), output))
else:
request.write(output)
if errored:
request.finish()
_cleanup(request)
return
return Again
###############################################################################
# Event Stream
###############################################################################
def event_stream(func):
"""
The ``@async.event_stream`` decorator allows you to easily push server-sent
events from Pants to your web clients using the new HTML5 `EventSource
<http://dev.w3.org/html5/eventsource/>`_ API. Example::
from pants.web import Application, async, TimeoutError
@app.route("/events")
@async.event_stream
def events(request):
try:
message = yield async.receive("events", 10)
except TimeoutError:
yield None
else:
yield message
# Elsewhere...
async.send("events", "Something happened!")
When you yield a value, there are a few ways it can be processed.
1. If the value is empty, None, etc. a single comment line will be sent to
the client to keep the connection alive.
2. If the value is a tuple, it will be separated into ``(output, headers)``
and the provided message headers will be prepended to the output before
it's sent to the client.
3. Any other values for the output will result in normal output processing
before the output is sent to the client as a message.
.. note::
``@async.event_stream`` automatically formats output messages, handling
line breaks for you.
"""
@wraps(func)
def wrapped(request, *args, **kwargs):
# Set a bit of state for the request.
request._writer = _event_stream_output
_init(request)
request._chunked = True
# Create the generator.
try:
request._gen = gen = func(request, *args, **kwargs)
except Exception:
_cleanup(request)
raise
# If we've not got a generator, return the output.
if not isinstance(gen, GeneratorType):
_cleanup(request)
return gen
# Set a flag on the request so Application won't finish processing it.
request.auto_finish = False
# Now let's run the generator for the first time. No input yet, for
# obvious reasons.
_do(request, None)
return wrapped
async.event_stream = event_stream
def _event_stream_output(request, output):
"""
Write a text/event-stream message to the client. If no data has been sent
yet, it writes a 200 OK response code and a Content-Type header.
"""
if not request._started:
request.send_status()
request.send_headers({'Content-Type': 'text/event-stream'})
if output is Finished:
# We're finished.
request.connection.close()
_cleanup(request)
return
if isinstance(output, tuple):
output, headers = output
else:
headers = {}
if not output and not headers:
# Send a simple comment line for keep-alive.
request.write(":\r\n")
return Again
if output is None:
output = ""
else:
# Cast the output into something usable.
output = _cast(request, output)
# Split up output, adding "data:" field names, and then prepend the
# provided headers, if there are any.
output = "\r\n".join("data: %s" % x for x in output.splitlines())
for key, value in headers.iteritems():
output = "%s: %s\r\n%s" % (key, _cast(request, value), output)
# Write it out, with an extra blank line so that the client will read
# the message.
request.write("%s\r\n\r\n" % output)
return Again
###############################################################################
# Asynchronous _Sleeper
###############################################################################
class _Sleeper(tuple):
def __repr__(self):
return "_Sleeper(%r)" % self[0]
def sleep(time):
"""
Sleep for *time* seconds, doing nothing else during that period.
"""
return _Sleeper((time,))
async.sleep = sleep
###############################################################################
# Asynchronous Caller
###############################################################################
def run(function, *args, **kwargs):
"""
Run *function* with the provided *args* and *kwargs*.
This works for any function that supports the ``callback`` keyword argument
by inserting a callback object into the keyword arguments before calling
the function.
If you need to asynchronously call a function that *doesn't* use
``callback``, please use :func:`async.callback`.
Here is a brief example using an `asyncmongo
<https://github.com/bitly/asyncmongo>`_ Client named ``db``::
@app.route("/count")
@async
def count(request):
results = yield async.run(db.news.find, {'published': True})
yield len(results)
.. note::
``async.run`` does *not* process keyword arguments passed to the
callback. If you require the keyword arguments, you must use
:func:`async.callback` manually.
Calling ``async.run`` returns the instance of
:class:`pants.web.Callback` used. Yielding that instance will
wait until the callback is triggered and return the value passed to
the callback.
"""
# Create a callback and set the callback keyword argument.
kwargs['callback'] = cb = Callback()
# Ignore the return value.
function(*args, **kwargs)
# Return the callback.
return cb
async.run = run
class Callback(object):
"""
Return an instance of :class:`pants.web.Callback` that can be used as a
callback with other asynchronous code to capture output and return it to
an asynchronous request handler.
Yielding an instance of Callback will wait until the callback has been
triggered, and then return values that were sent to the callback so that
they may be used by the asynchronous request handler.
It's easy::
@app.route("/")
@async
def index(request):
callback = async.callback()
do_something_crazy(request, on_complete=callback)
result = yield callback
if not result:
abort(403)
yield result
"""
__slots__ = ("request", "use_kwargs")
def __init__(self, use_kwargs=False):
# Store this callback.
self.use_kwargs = use_kwargs
self.request = request = Application.current_app.request
request._callbacks[self] = Waiting
request._unhandled.append(self)
def __call__(self, *args, **kwargs):
request = self.request
if hasattr(request, "_callbacks"):
if self.use_kwargs:
args = (args, kwargs)
elif len(args) == 1:
args = args[0]
request._callbacks[self] = args
# Now, see if we're finished waiting.
_check_waiting(request, self)
async.callback = Callback
###############################################################################
# Waiting
###############################################################################
class _WaitList(list):
timeout = None
def wait(timeout=None):
"""
Wait for all asynchronous callbacks to return, and return a list of those
values. If a *timeout* is provide, wait up to that many seconds for the
callbacks to return before raising a TimeoutError containing a list of
the results that *did* complete.
"""
request = Application.current_app.request
top, request._unhandled = _WaitList(request._unhandled), []
top.timeout = timeout
return top
async.wait = wait
def _wait_timeout(request):
"""
Handle a timed-out async.wait().
"""
if not hasattr(request, "_in_do"):
# Don't deal with requests that were closed. Just don't.
return
# Get the item off the top of the waiting stack, and make sure it's
# something we can work with.
if not request._waiting or not isinstance(request._waiting[-1], _WaitList):
return
# Build the input list.
input = []
for key in request._waiting.pop():
value = request._callbacks.pop(key)
input.append(value if value is not Waiting else None)
# Now, pass it along to _do. Note the as_exception=True.
_do(request, TimeoutError(input), as_exception=True)
def _check_waiting(request, trigger=None):
"""
Check the waiting list for the provided request to determine if we should
be taking action. If we should, pop the top item from the waiting list and
send the input we've gathered into _do.
"""
if not hasattr(request, "_in_do"):
# If this happens, the request was *probably* closed. There's nothing
# to do, so just get out of here.
return
# Get the item off the top of the waiting stack, and make sure it's
# something we can work with.
top = request._waiting[-1] if request._waiting else None
if not isinstance(top, (_WaitList, Callback)):
return
# If a trigger was provided, check to see if the top *is* that trigger. If
# this is the case, we can just return the result for that specific item.
if top is trigger:
# It is. We can pop off the top and send the input now.
request._waiting.pop()
_do(request, request._callbacks.pop(trigger))
return
# If we're still here, then we've got a list of callbacks to wait on. If
# any of those are still Waiting, we're not done yet, so return early.
if any(request._callbacks[key] is Waiting for key in top):
return
# Check the _WaitList's timeout, and clear it if we find one.
if callable(top.timeout):
top.timeout()
# We're finished, so build the list and send it on to _do.
input = [request._callbacks.pop(key) for key in request._waiting.pop()]
_do(request, input)
###############################################################################
# Message Sending
###############################################################################
class _Receiver(tuple):
timeout = None
ref = None
def send(key, *args):
"""
Send a message with the provided ``*args`` to all asynchronous requests
listening for *key*.
"""
# Get the list of requests listening for key. If there aren't any, return.
recv = receivers.pop(key, None)
if not recv:
return
# If we only have one argument, pop it out of its tuple.
if len(args) == 1:
args = args[0]
# Now, for each listening request, make sure it's still alive before
# sending the arguments its way.
for ref in recv:
request = ref()
if not request:
continue
# Check for the _in_do attribute, to make sure the request is still
# working asynchronously.
if not hasattr(request, "_in_do"):
continue
# Get the top of the request's wait list and make sure it's what
# we expect.
if not request._waiting or not isinstance(request._waiting[-1], _Receiver):
continue
# Pop the top item off the wait list and clear any timeout.
top = request._waiting.pop()
if callable(top.timeout):
top.timeout()
# Now, send the message.
_do(request, args)
async.send = send
def receive(key, timeout=None):
"""
Listen for messages with the key *key*. If *timeout* is specified, wait
up to that many seconds before raising a TimeoutError.
"""
out = _Receiver((key, timeout))
return out
async.receive = receive
def _receive_timeout(request):
if not hasattr(request, "_in_do"):
return
# Make sure the top of the wait list is a _Receiver.
if not request._waiting or not isinstance(request._waiting[-1], _Receiver):
return
# Remove this request from the receivers list so we don't get any
# unexpected input later on.
top = request._waiting.pop()
if top[0] in receivers and top.ref in receivers[top[0]]:
receivers[top[0]].remove(top.ref)
# Now, send along a TimeoutError.
_do(request, TimeoutError(), as_exception=True)
###############################################################################
# Asynchronous Internals
###############################################################################
def _init(request):
"""
Set a bit of state for the request.
"""
request._in_do = False
request._chunked = False
request._charset = "utf-8"
request._tb = None
request._callbacks = {}
request._waiting = []
request._unhandled = []
# Create a RequestContext.
request._context = RequestContext()
def _cast(request, output):
"""
Convert an output object into something we can send over a connection.
"""
if hasattr(output, "to_html"):
output = output.to_html()
if isinstance(output, (tuple, list, dict)):
with request._context as app:
return json.dumps(output, cls=app.json_encoder)
elif isinstance(output, unicode):
return output.encode(request._charset)
elif not isinstance(output, str):
with request._context:
return str(output)
return output
def _cleanup(request):
"""
Delete the context manager and everything else.
"""
del request._in_do
del request._chunked
del request._charset
del request._unhandled
del request._context
try:
del request._gen
except AttributeError:
del request._callbacks
del request._waiting
return
# Cleanup any timers.
for item in request._waiting:
timer = getattr(item, "timeout", None)
if timer and callable(timer):
try:
timer()
except Exception:
# Who knows what could happen here.
pass
# Asynchronous Internals
request._callbacks.clear()
del request._callbacks
del request._waiting
def _do(request, input, as_exception=False):
"""
Send the provided input to the asynchronous request handler for *request*.
If ``as_exception`` is truthy, throw it into the generator as an exception,
otherwise it's just sent.
"""
if request._in_do:
# Let's not enter some bizarre stack recursion that can cause all sorts
# of badness today, shall we? Put off the next _do till the next
# engine cycle.
request.connection.engine.callback(_do, request, input, as_exception)
return
try:
request._in_do = True
while True:
errored = False
with request._context as app:
# Make sure we're connected.
if not request.connection.connected:
try:
# Bubble up an error so the user's code can do something
# about this.
request._gen.throw(RequestClosed())
except RequestClosed:
# Don't react at all to our own exception.
pass
except Exception:
# Just log any other exception. The request is already
# closed, so there's not a lot *else* to do.
log.exception("Error while cleaning up closed "
"asynchronous request: %s %s" %
(request.method, request.url))
finally:
_cleanup(request)
return
try:
if as_exception:
output = request._gen.throw(input)
else:
output = request._gen.send(input)
except StopIteration:
# We've run out of content. Setting output to Finished
# tells the output handler to close up and go home.
output = Finished
except HTTPException as err:
if request._started:
log.exception("Error while handling asynchronous "
"request: %s %s" % (request.method,
request.url))
request.connection.close(False)
_cleanup(request)
return
errored = True
request._tb = traceback.format_exc()
err_handler = getattr(app, "handle_%d" % err.status, None)
if err_handler:
output = err_handler(request, err)
else:
output = error(err.message, err.status, err.headers,
request=request)
except HTTPTransparentRedirect as err:
if request._started:
log.exception("HTTPTransparentRedirect sent to already "
"started request: %s %s" %
(request.method, request.url))
request.connection.close(False)
_cleanup(request)
return
errored = True
output = err
request._tb = traceback.format_exc()
except Exception as err:
if request._started:
log.exception("Error while handling asynchronous "
"request: %s %s" % (request.method,
request.url))
request.connection.close(False)
_cleanup(request)
return
errored = True
request._tb = traceback.format_exc()
try:
output = app.handle_500(request, err)
except Exception:
# There's an error with the handle_500 function.
log.exception("There was a problem handling a request, and a "
"problem running Application.handle_500 for %r."
% app)
output = error(500, request=request)
# Did we error?
if errored:
# Clear the rule data, because errors don't care about it.
request._rule_content_type = None
request._rule_headers = None
_async_finish(request, output)
return
# Returning a list of Callback instances is the only way to control
# exactly what you're waiting for.
if not isinstance(output, _WaitList) and \
isinstance(output, (tuple, list)) and \
all(isinstance(x, Callback) for x in output):
output = _WaitList(output)
# Now that we're out of the request context, let's see what we've got to
# work with.
if isinstance(output, _Sleeper):
# Just sleep.
request.connection.engine.defer(output[0], _do, request, None)
elif isinstance(output, Callback):
# Shove the callback onto its own waiting list.
request._unhandled.remove(output)
request._waiting.append(output)
elif isinstance(output, _WaitList):
# Push the WaitList onto the waiting list.
if output.timeout:
output.timeout = request.connection.engine.defer(output.timeout, _wait_timeout, request)
request._waiting.append(output)
elif isinstance(output, _Receiver):
# Push the Receiver onto the waiting list.
if output[1]:
output.timeout = request.connection.engine.defer(output[1], _receive_timeout, request)
output.ref = ref = weakref.ref(request)
receivers.setdefault(output[0], []).append(ref)
request._waiting.append(output)
else:
# We've received some content, so write it out.
if request._writer(request, output) is Again:
input = None
as_exception = False
continue
# We *have* to continue if we don't want to break.
break
finally:
if hasattr(request, "_in_do"):
request._in_do = False
| StarcoderdataPython |
1834119 | # Copyright (c) 2017 <NAME>
""" from https://github.com/keithito/tacotron """
from . import cleaners
def _clean_text(text, cleaner_names, *args):
for name in cleaner_names:
cleaner = getattr(cleaners, name)
if not cleaner:
raise Exception('Unknown cleaner: %s' % name)
text = cleaner(text, *args)
return text
| StarcoderdataPython |
6544342 | <filename>parse.py
# <NAME>, Rutgers University
# Created on 30 June 2019
# Modified on 2 July 2019
# This simple scripts reads all emoji counts from emojitracker and saves
# them into a csv file. Simply save emojitracker.com as a html file and
# pass the file name to this parser.
#
# How to run: python parse.py
#
# Requirements: pip install BeautifulSoup
from BeautifulSoup import BeautifulSoup as BSHTML
INPUT_FILE = 'emojitracker-sample.html'
OUTPUT_FILE = 'output-sample.csv'
INPUT_FILE = 'emojitracker-2-july-2019.html'
OUTPUT_FILE = 'output.csv'
f = open(OUTPUT_FILE,'w')
f.write('unicode\tname\tcount\n') # write headers
with open(INPUT_FILE) as texts:
soup = BSHTML(texts)
lis = soup.findAll('li', attrs = {'class' : 'emoji_char'})
for li in lis:
emoji = li['id'].lower()
name = li['data-title'].lower()
count = li.find('span', attrs = {'class' : 'score'}).text
f.write(emoji+'\t"'+name+'"\t'+count+'\n') # write to file
f.close()
| StarcoderdataPython |
3548288 | # Blueprint for admin page with PR management
from tv import login_manager, db, config, app
import os
import uuid
from datetime import datetime, timedelta
from flask_login import LoginManager, current_user, login_user, logout_user, login_required
from flask import Blueprint, flash, redirect, render_template, request
from data import User, PR, add_pr, fix_date
from forms import PRForm, ModifyPRForm
from werkzeug.utils import secure_filename
admin_page = Blueprint("admin", __name__)
def allowed_file(filename):
return '.' in filename and \
filename.rsplit('.', 1)[1].lower() in config.ALLOWED_EXTENSIONS
def check_priority(start, end, priority):
start, end = fix_date(start, end, priority)
ps = PR.query.filter_by(priority=1).all()
if ps == None:
return True, ""
# Check for overlapping prio PRs
for p in ps:
if (start.date() == p.start_date.date()):
return False, "You priority PR overlaps with another priority PR. \
Please contact dHack to resolve this issue."
return True, ""
# Admin page with PR list, upload and deletion
@admin_page.route("/admin", methods=['GET', 'POST'])
@login_required
def admin():
form = PRForm()
if form.validate_on_submit():
filename = form.file.data.filename
if not filename or not allowed_file(filename):
flash("File type not supported")
return redirect("/admin")
# Check if start date is after end date
if (form.start_date.data > form.end_date.data):
flash("Start date is after end date.")
return redirect("/admin")
if form.priority.data > 0:
check, msg = check_priority(form.start_date.data,
form.end_date.data ,
form.priority.data )
if check == False:
flash(msg)
return redirect("/admin")
org_filename = secure_filename(filename)
# Generate random filename with correct extention
filename = str(uuid.uuid4()) + "." + \
org_filename.rsplit('.', 1)[1].lower()
form.file.data.save(os.path.join(
app.config['UPLOAD_FOLDER'], filename))
add_pr(file_name=filename,
desc=form.desc.data,
priority=form.priority.data,
start_date=form.start_date.data,
end_date=form.end_date.data,
user_id=current_user.id,
owner=current_user.username)
return redirect("/admin")
else:
# Change the default start and end dates
today = datetime.today()
form.start_date.data = today
form.end_date.data = today
if current_user.role == "admin":
pr = PR.query.all()
else:
pr = PR.query.filter_by(user_id=current_user.id)
return render_template("admin.html", user=current_user, pr_list=pr, form=form)
# Deletes a PR on request if the current user has the right permissions
# Takes PR id "id" as argument
@admin_page.route("/admin/delete")
@login_required
def delete():
id = request.args.get("id")
if id == None:
flash("Invalid arguments")
return redirect("/admin")
pr = PR.query.filter_by(id=id).first()
if pr == None:
flash("Id does not exist")
return redirect("/admin")
if current_user.role != "admin" and current_user.id != pr.user_id:
flash("You don't have permissions to delete this pr")
return redirect("/admin")
try:
os.remove(os.path.join(config.UPLOAD_FOLDER, pr.file_name))
except:
flash("PR wasn't found on disk but the database entry has been removed")
db.session.delete(pr)
db.session.commit()
flash("PR successfully deleted")
return redirect("/admin")
# PR modification page. Takes an PR id "id" as an argument.
@admin_page.route("/admin/modify_pr", methods=['GET', 'POST'])
@login_required
def modify():
id = request.args.get("id")
if id == None:
flash("Invalid arguments")
return redirect("/admin")
pr = PR.query.filter_by(id=id).first()
if pr == None:
flash("Invalid PR id")
return redirect("/admin")
if current_user.role != "admin" and current_user.id != pr.user_id:
flash("You don't have permissions to modify this PR")
redirect("/admin")
form = ModifyPRForm()
if form.validate_on_submit():
start, end = fix_date(form.start_date.data, form.end_date.data, form.priority.data)
pr.start_date = start
pr.end_date = end
pr.priority = form.priority.data
db.session.commit()
flash('The PR has been sucessfully modified')
return redirect("/admin")
else:
# Change for fields to the PR's current values
form.start_date.data = pr.start_date
form.end_date.data = pr.end_date
form.priority.data = pr.priority
return render_template('modify_pr.html', form=form, pr=pr)
| StarcoderdataPython |
3265690 | <gh_stars>1000+
# -*- coding: utf-8 -*-
#
# Copyright © Spyder Project Contributors
# Licensed under the terms of the MIT License
# (see spyder/__init__.py for details)
"""Plugin registry configuration page."""
# Third party imports
from qtpy.QtWidgets import (QGroupBox, QVBoxLayout, QCheckBox,
QGridLayout, QLabel)
# Local imports
from spyder.api.plugins import SpyderPlugin
from spyder.api.preferences import PluginConfigPage
from spyder.config.base import _
from spyder.config.manager import CONF
class PluginsConfigPage(PluginConfigPage):
def setup_page(self):
newcb = self.create_checkbox
self.plugins_checkboxes = {}
header_label = QLabel(
_("Here you can turn on/off any internal or external Spyder plugin "
"to disable functionality that is not desired or to have a lighter "
"experience. Unchecked plugins in this page will be unloaded "
"immediately and will not be loaded the next time Spyder starts."))
header_label.setWordWrap(True)
# ------------------ Internal plugin status group ---------------------
internal_layout = QGridLayout()
self.internal_plugins_group = QGroupBox(_("Internal plugins"))
i = 0
for plugin_name in self.plugin.all_internal_plugins:
(conf_section_name,
PluginClass) = self.plugin.all_internal_plugins[plugin_name]
if not getattr(PluginClass, 'CAN_BE_DISABLED', True):
# Do not list core plugins that can not be disabled
continue
plugin_loc_name = None
if hasattr(PluginClass, 'get_name'):
plugin_loc_name = PluginClass.get_name()
elif hasattr(PluginClass, 'get_plugin_title'):
plugin_loc_name = PluginClass.get_plugin_title()
plugin_state = CONF.get(conf_section_name, 'enable', True)
cb = newcb(plugin_loc_name, 'enable', default=True,
section=conf_section_name, restart=True)
internal_layout.addWidget(cb, i // 2, i % 2)
self.plugins_checkboxes[plugin_name] = (cb, plugin_state)
i += 1
self.internal_plugins_group.setLayout(internal_layout)
# ------------------ External plugin status group ---------------------
external_layout = QGridLayout()
self.external_plugins_group = QGroupBox(_("External plugins"))
i = 0
for i, plugin_name in enumerate(self.plugin.all_external_plugins):
(conf_section_name,
PluginClass) = self.plugin.all_external_plugins[plugin_name]
plugin_loc_name = None
if hasattr(PluginClass, 'get_name'):
plugin_loc_name = PluginClass.get_name()
elif hasattr(PluginClass, 'get_plugin_title'):
plugin_loc_name = PluginClass.get_plugin_title()
cb = newcb(plugin_loc_name, 'enable', default=True,
section=conf_section_name, restart=True)
external_layout.addWidget(cb, i // 2, i % 2)
self.plugins_checkboxes[plugin_name] = cb
i += 1
self.external_plugins_group.setLayout(external_layout)
layout = QVBoxLayout()
layout.addWidget(header_label)
layout.addWidget(self.internal_plugins_group)
if self.plugin.all_external_plugins:
layout.addWidget(self.external_plugins_group)
layout.addStretch(1)
self.setLayout(layout)
def apply_settings(self):
for plugin_name in self.plugins_checkboxes:
cb, previous_state = self.plugins_checkboxes[plugin_name]
if cb.isChecked() and not previous_state:
self.plugin.set_plugin_enabled(plugin_name)
PluginClass = None
external = False
if plugin_name in self.plugin.all_internal_plugins:
(__,
PluginClass) = self.plugin.all_internal_plugins[plugin_name]
elif plugin_name in self.plugin.all_external_plugins:
(__,
PluginClass) = self.plugin.all_external_plugins[plugin_name]
external = True
# TODO: Once we can test that all plugins can be restarted
# without problems during runtime, we can enable the
# autorestart feature provided by the plugin registry:
# self.plugin.register_plugin(self.main, PluginClass,
# external=external)
elif not cb.isChecked() and previous_state:
# TODO: Once we can test that all plugins can be restarted
# without problems during runtime, we can enable the
# autorestart feature provided by the plugin registry:
# self.plugin.delete_plugin(plugin_name)
pass
return set({})
| StarcoderdataPython |
6479342 | <filename>app/apps/order_item/migrations/0004_auto_20201206_1747.py<gh_stars>1-10
# Generated by Django 3.1.3 on 2020-12-06 14:47
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('order_item', '0003_auto_20201206_1707'),
]
operations = [
migrations.AddField(
model_name='orderitem',
name='total_amount',
field=models.FloatField(default=0, verbose_name='Toplam Tutar'),
),
migrations.AddField(
model_name='orderitem',
name='total_amount_with_vat',
field=models.FloatField(default=0,
verbose_name='Toplam Tutar (KDVli)'),
),
]
| StarcoderdataPython |
3498624 | from flask import Flask
from .api.v1 import version1 as v1
from .api.v2 import version2 as v2
from .api.db_config import create_tables
def create_app():
app = Flask(__name__)
app.url_map.strict_slashes = False
create_tables()
app.register_blueprint(v1)
app.register_blueprint(v2)
@app.errorhandler(404)
def page_not_found(e):
return 'The requested page does not exist. Kindly check your url'
@app.errorhandler(403)
def forbidden_access(e):
return 'Access forbiden'
@app.errorhandler(500)
def internal_server_error(e):
return 'An error occurred.'
return app
| StarcoderdataPython |
12812417 | <reponame>minkione/mat
from mat.utils.utils import Utils, Issue
class Issue(Issue):
TITLE = 'Fragment Injection Check'
DESCRIPTION = 'Checks if the application is vulnerable to fragment injection'
ID = 'fragment-injection'
ISSUE_TITLE = 'Application Vulnerable To Fragment Injection'
FINDINGS = 'The Team found the application was vulnerable to fragment injection:\n'
def dependencies(self):
return self.ANALYSIS.UTILS.check_dependencies(['static'])
def run(self):
activities = Utils.grep(r'extends PreferenceActivity', self.ANALYSIS.LOCAL_SOURCE)
if activities and self.ANALYSIS.MANIFEST.get_sdk('min') < '18':
self.REPORT = True
self.DETAILS = Utils.grep_details(activities, self.ANALYSIS.LOCAL_SOURCE)
| StarcoderdataPython |
5177429 | <reponame>personalrobotics/herbpy
#!/usr/bin/env python
PKG = 'herbpy'
import roslib; roslib.load_manifest(PKG)
import numpy, unittest
import herbpy
env, robot = herbpy.initialize(sim=True)
class WamTest(unittest.TestCase):
def setUp(self):
self._env, self._robot = env, robot
self._wam = robot.right_arm
self._indices = self._wam.GetArmIndices()
self._num_dofs = len(self._indices)
def test_SetStiffness_DoesNotThrow(self):
self._wam.SetStiffness(0.0)
self._wam.SetStiffness(0.5)
self._wam.SetStiffness(1.0)
def test_SetStiffness_InvalidStiffnessThrows(self):
self.assertRaises(Exception, self._wam.SetStiffness, (-0.2))
self.assertRaises(Exception, self._wam.SetStiffness, ( 1.2))
def test_Servo_DoesNotThrow(self):
self._wam.Servo(0.1 * numpy.ones(self._num_dofs))
def test_Servo_IncorrectSizeThrows(self):
velocity_small = 0.1 * numpy.ones(self._num_dofs - 1)
velocity_large = 0.1 * numpy.ones(self._num_dofs + 1)
self.assertRaises(Exception, self._wam.Servo, (velocity_small,))
self.assertRaises(Exception, self._wam.Servo, (velocity_large,))
def test_Servo_ExceedsVelocityLimitThrows(self):
velocity_limits = self._robot.GetDOFVelocityLimits(self._indices)
velocity_limits_small = -velocity_limits - 0.1 * numpy.ones(self._num_dofs)
velocity_limits_large = velocity_limits + 0.1 * numpy.ones(self._num_dofs)
self.assertRaises(Exception, self._wam.Servo, (velocity_limits_small, 0.3))
self.assertRaises(Exception, self._wam.Servo, (velocity_limits_large, 0.3))
def test_Servo_InvalidAccelTimeThrows(self):
velocity_limits = 0.1 * numpy.ones(self._num_dofs)
self.assertRaises(Exception, self._wam.Servo, (velocity_limits, -0.1))
self.assertRaises(Exception, self._wam.Servo, (velocity_limits, 0.0))
def test_SetVelocityLimits_SetsLimits(self):
velocity_limits = 0.1 * numpy.ones(self._num_dofs)
self._wam.SetVelocityLimits(velocity_limits, 0.3)
numpy.testing.assert_array_almost_equal(self._robot.GetDOFVelocityLimits(self._indices), velocity_limits)
def test_MoveUntilTouch_ZeroDirectionThrows(self):
self.assertRaises(Exception, self._wam.MoveUntilTouch, (numpy.zeros(3), 0.1))
def test_MoveUntilTouch_ZeroDistanceThrows(self):
self.assertRaises(Exception, self._wam.MoveUntilTouch, (numpy.array([1., 0., 0.]), 0.0))
def test_MoveUntilTouch_NonPositiveForceThrows(self):
self.assertRaises(Exception, self._wam.MoveUntilTouch, (numpy.array([1., 0., 0.]), 0.1, 0.))
if __name__ == '__main__':
import rosunit
rosunit.unitrun(PKG, 'test_wam', WamTest)
| StarcoderdataPython |
361872 | #
# Copyright 2021 Amazon.com, Inc. or its affiliates. All Rights Reserved.
# Copyright (C) 2018-2021 UAVCAN Development Team <uavcan.org>
# This software is distributed under the terms of the MIT License.
#
"""
jinja-based :class:`~nunavut.generators.AbstractGenerator` implementation.
"""
import datetime
import io
import logging
import pathlib
import re
import shutil
import typing
import nunavut.generators
import nunavut.lang
import nunavut.postprocessors
import pydsdl
from nunavut._utilities import YesNoDefault
from yaml import Dumper as YamlDumper
from yaml import dump as yaml_dump
from .environment import CodeGenEnvironment
from .jinja2 import Template
from .loaders import DEFAULT_TEMPLATE_PATH, TEMPLATE_SUFFIX, DSDLTemplateLoader
logger = logging.getLogger(__name__)
# +---------------------------------------------------------------------------+
# | JINJA : CodeGenerator
# +---------------------------------------------------------------------------+
class CodeGenerator(nunavut.generators.AbstractGenerator):
"""
Abstract base class for all Generators that build source code using Jinja templates.
:param nunavut.Namespace namespace: The top-level namespace to generates code
at and from.
:param YesNoDefault generate_namespace_types: Set to YES to emit files for namespaces.
NO will suppress namespace file generation and DEFAULT will
use the language's preference.
:param templates_dir: Directories containing jinja templates. These will be available along
with any built-in templates provided by the target language. The templates
at these paths will take precedence masking any built-in templates
where the names are the same. See :class:`jinja2.ChoiceLoader` for rules
on the lookup hierarchy.
:type templates_dir: typing.Optional[typing.Union[pathlib.Path,typing.List[pathlib.Path]]]
:param bool followlinks: If True then symbolic links will be followed when
searching for templates.
:param bool trim_blocks: If this is set to True the first newline after a
block is removed (block, not variable tag!).
:param bool lstrip_blocks: If this is set to True leading spaces and tabs
are stripped from the start of a line to a block.
Defaults to False.
:param typing.Dict[str, typing.Callable] additional_filters: typing.Optional jinja filters to add to the
global environment using the key as the filter name
and the callable as the filter.
:param typing.Dict[str, typing.Callable] additional_tests: typing.Optional jinja tests to add to the
global environment using the key as the test name
and the callable as the test.
:param typing.Dict[str, typing.Any] additional_globals: typing.Optional objects to add to the template
environment globals collection.
:param post_processors: A list of :class:`nunavut.postprocessors.PostProcessor`
:type post_processors: typing.Optional[typing.List[nunavut.postprocessors.PostProcessor]]
:param builtin_template_path: If provided overrides the folder name under which built-in templates are loaded from
within a target language's package (i.e. ignored if no target language is
specified). For example, if the target language is ``c`` and this parameter
was set to ``foo`` then built-in templates would be loaded from
``nunavut.lang.c.foo``.
:raises RuntimeError: If any additional filter or test attempts to replace a built-in
or otherwise already defined filter or test.
"""
@staticmethod
def __augment_post_processors_with_ln_limit_empty_lines(
post_processors: typing.Optional[typing.List["nunavut.postprocessors.PostProcessor"]], limit_empty_lines: int
) -> typing.List["nunavut.postprocessors.PostProcessor"]:
"""
Subroutine of _handle_post_processors method.
"""
from nunavut.postprocessors import LimitEmptyLines
if post_processors is None:
post_processors = [LimitEmptyLines(limit_empty_lines)]
else:
found_pp = False
for pp in post_processors:
if isinstance(pp, LimitEmptyLines):
found_pp = True
break
if not found_pp:
post_processors.append(LimitEmptyLines(limit_empty_lines))
return post_processors
@staticmethod
def __augment_post_processors_with_ln_trim_trailing_whitespace(
post_processors: typing.Optional[typing.List["nunavut.postprocessors.PostProcessor"]],
) -> typing.List["nunavut.postprocessors.PostProcessor"]:
"""
Subroutine of _handle_post_processors method.
"""
from nunavut.postprocessors import TrimTrailingWhitespace
if post_processors is None:
post_processors = [TrimTrailingWhitespace()]
else:
found_pp = False
for pp in post_processors:
if isinstance(pp, TrimTrailingWhitespace):
found_pp = True
break
if not found_pp:
post_processors.append(TrimTrailingWhitespace())
return post_processors
@classmethod
def _handle_post_processors(
cls,
post_processors: typing.Optional[typing.List["nunavut.postprocessors.PostProcessor"]],
target_language: typing.Optional["nunavut.lang.Language"],
) -> typing.Optional[typing.List["nunavut.postprocessors.PostProcessor"]]:
"""
Used by constructor to process an optional list of post-processors and to augment or create this list
if needed to support language options.
"""
if target_language is not None:
try:
limit_empty_lines = target_language.get_config_value("limit_empty_lines")
post_processors = cls.__augment_post_processors_with_ln_limit_empty_lines(
post_processors, int(limit_empty_lines)
)
except KeyError:
pass
if target_language.get_config_value_as_bool("trim_trailing_whitespace"):
post_processors = cls.__augment_post_processors_with_ln_trim_trailing_whitespace(post_processors)
return post_processors
def __init__(
self,
namespace: nunavut.Namespace,
generate_namespace_types: YesNoDefault = YesNoDefault.DEFAULT,
templates_dir: typing.Optional[typing.Union[pathlib.Path, typing.List[pathlib.Path]]] = None,
followlinks: bool = False,
trim_blocks: bool = False,
lstrip_blocks: bool = False,
additional_filters: typing.Optional[typing.Dict[str, typing.Callable]] = None,
additional_tests: typing.Optional[typing.Dict[str, typing.Callable]] = None,
additional_globals: typing.Optional[typing.Dict[str, typing.Any]] = None,
post_processors: typing.Optional[typing.List["nunavut.postprocessors.PostProcessor"]] = None,
builtin_template_path: str = DEFAULT_TEMPLATE_PATH,
):
super().__init__(namespace, generate_namespace_types)
if templates_dir is not None and not isinstance(templates_dir, list):
templates_dir = [templates_dir]
language_context = self._namespace.get_language_context()
target_language = language_context.get_target_language()
self._dsdl_template_loader = DSDLTemplateLoader(
templates_dirs=templates_dir,
package_name_for_templates=(
None if target_language is None else target_language.get_templates_package_name()
),
followlinks=followlinks,
builtin_template_path=builtin_template_path,
)
self._post_processors = self._handle_post_processors(post_processors, target_language)
self._env = CodeGenEnvironment(
lctx=language_context,
loader=self._dsdl_template_loader,
lstrip_blocks=lstrip_blocks,
trim_blocks=trim_blocks,
additional_filters=additional_filters,
additional_tests=additional_tests,
additional_globals=additional_globals,
)
@property
def dsdl_loader(self) -> DSDLTemplateLoader:
return self._dsdl_template_loader
@property
def language_context(self) -> nunavut.lang.LanguageContext:
return self._namespace.get_language_context()
# +-----------------------------------------------------------------------+
# | PROTECTED
# +-----------------------------------------------------------------------+
def _handle_overwrite(self, output_path: pathlib.Path, allow_overwrite: bool) -> None:
if output_path.exists():
if allow_overwrite:
output_path.chmod(output_path.stat().st_mode | 0o220)
else:
raise PermissionError("{} exists and allow_overwrite is False.".format(output_path))
# +-----------------------------------------------------------------------+
# | AbstractGenerator
# +-----------------------------------------------------------------------+
def get_templates(self) -> typing.Iterable[pathlib.Path]:
"""
Enumerate all templates found in the templates path.
:data:`~TEMPLATE_SUFFIX` as the suffix for the filename.
:return: A list of paths to all templates found by this Generator object.
"""
return self._dsdl_template_loader.get_templates()
# +-----------------------------------------------------------------------+
# | PRIVATE
# +-----------------------------------------------------------------------+
@staticmethod
def _filter_and_write_line(
line_and_lineend: typing.Tuple[str, str],
output_file: typing.TextIO,
line_pps: typing.List["nunavut.postprocessors.LinePostProcessor"],
) -> None:
for line_pp in line_pps:
line_and_lineend = line_pp(line_and_lineend)
if line_and_lineend is None:
raise ValueError(
"line post processor must return a 2-tuple. To elide a line return a tuple of empty"
"strings. None is not a valid value."
)
output_file.write(line_and_lineend[0])
output_file.write(line_and_lineend[1])
@classmethod
def _generate_with_line_buffer(
cls,
output_file: typing.TextIO,
template_gen: typing.Generator[str, None, None],
line_pps: typing.List["nunavut.postprocessors.LinePostProcessor"],
) -> None:
newline_pattern = re.compile(r"\n|\r\n", flags=re.MULTILINE)
line_buffer = io.StringIO()
for part in template_gen:
search_pos = 0 # type: int
match_obj = newline_pattern.search(part, search_pos)
while True:
if search_pos < 0 or search_pos >= len(part):
break
if match_obj is None:
line_buffer.write(part[search_pos:])
break
# We have a newline
line_buffer.write(part[search_pos : match_obj.start()])
newline_chars = part[match_obj.start() : match_obj.end()]
line = line_buffer.getvalue() # type: str
line_buffer = io.StringIO()
cls._filter_and_write_line((line, newline_chars), output_file, line_pps)
search_pos = match_obj.end()
match_obj = newline_pattern.search(part, search_pos)
remainder = line_buffer.getvalue()
if len(remainder) > 0:
cls._filter_and_write_line((remainder, ""), output_file, line_pps)
def _generate_code(
self,
output_path: pathlib.Path,
template: Template,
template_gen: typing.Generator[str, None, None],
allow_overwrite: bool,
) -> None:
"""
Logic that should run from _generate_type iff is_dryrun is False.
"""
self._env.now_utc = datetime.datetime.utcnow()
from ..lang._common import UniqueNameGenerator
# reset the name generator state for this type
UniqueNameGenerator.reset()
# Predetermine the post processor types.
line_pps = [] # type: typing.List['nunavut.postprocessors.LinePostProcessor']
file_pps = [] # type: typing.List['nunavut.postprocessors.FilePostProcessor']
if self._post_processors is not None:
for pp in self._post_processors:
if isinstance(pp, nunavut.postprocessors.LinePostProcessor):
line_pps.append(pp)
elif isinstance(pp, nunavut.postprocessors.FilePostProcessor):
file_pps.append(pp)
else:
raise ValueError("PostProcessor type {} is unknown.".format(type(pp)))
logger.debug("Using post-processors: %r %r", line_pps, file_pps)
self._handle_overwrite(output_path, allow_overwrite)
output_path.parent.mkdir(parents=True, exist_ok=True)
with open(str(output_path), "w") as output_file:
if len(line_pps) > 0:
# The logic gets much more complex when doing line post-processing.
self._generate_with_line_buffer(output_file, template_gen, line_pps)
else:
for part in template_gen:
output_file.write(part)
for file_pp in file_pps:
output_path = file_pp(output_path)
# +---------------------------------------------------------------------------+
# | JINJA : DSDLCodeGenerator
# +---------------------------------------------------------------------------+
class DSDLCodeGenerator(CodeGenerator):
"""
:class:`~CodeGenerator` implementation that generates code for a given set
of DSDL types.
"""
# +-----------------------------------------------------------------------+
# | JINJA : filters
# +-----------------------------------------------------------------------+
@staticmethod
def filter_yamlfy(value: typing.Any) -> str:
"""
Filter to, optionally, emit a dump of the dsdl input as a yaml document.
Available as ``yamlfy`` in all template environments.
Example::
/*
{{ T | yamlfy }}
*/
Result Example (truncated for brevity)::
/*
!!python/object:pydsdl.StructureType
_attributes:
- !!python/object:pydsdl.Field
_serializable: !!python/object:pydsdl.UnsignedIntegerType
_bit_length: 16
_cast_mode: &id001 !!python/object/apply:pydsdl.CastMode
- 0
_name: value
*/
:param value: The input value to parse as yaml.
:return: If a yaml parser is available, a pretty dump of the given value as yaml.
If a yaml parser is not available then an empty string is returned.
"""
return str(yaml_dump(value, Dumper=YamlDumper))
def filter_type_to_template(self, value: typing.Any) -> str:
"""
Template for type resolution as a filter. Available as ``type_to_template``
in all template environments.
Example::
{%- for attribute in T.attributes %}
{%* include attribute.data_type | type_to_template %}
{%- if not loop.last %},{% endif %}
{%- endfor %}
:param value: The input value to change into a template include path.
:return: A path to a template named for the type with :any:`TEMPLATE_SUFFIX`
"""
result = self.dsdl_loader.type_to_template(type(value))
if result is None:
raise RuntimeError("No template found for type {}".format(type(value)))
return result.name
def filter_type_to_include_path(self, value: typing.Any, resolve: bool = False) -> str:
"""
Emits an include path to the output target for a given type.
Example::
# include "{{ T.my_type | type_to_include_path }}"
Result Example:
# include "foo/bar/my_type.h"
:param typing.Any value: The type to emit an include for.
:param bool resolve: If True the path returned will be absolute else the path will
be relative to the folder of the root namespace.
:return: A string path to output file for the type.
"""
include_path = self.namespace.find_output_path_for_type(value)
if resolve:
return include_path.resolve().as_posix()
else:
return include_path.relative_to(self.namespace.output_folder.parent).as_posix()
@staticmethod
def filter_typename(value: typing.Any) -> str:
"""
Filters a given token as its type name. Available as ``typename``
in all template environments.
This example supposes that ``T.some_value == "some string"``
Example::
{{ T.some_value | typename }}
Result Example::
str
:param value: The input value to filter into a type name.
:return: The ``__name__`` of the python type.
"""
return type(value).__name__
@staticmethod
def filter_alignment_prefix(offset: pydsdl.BitLengthSet) -> str:
"""
Provides a string prefix based on a given :class:`pydsdl.BitLengthSet`.
.. invisible-code-block: python
from nunavut.jinja import DSDLCodeGenerator
import pydsdl
.. code-block:: python
# Given
B = pydsdl.BitLengthSet(32)
# and
template = '{{ B | alignment_prefix }}'
# then ('str' is stropped to 'str_' before the version is suffixed)
rendered = 'aligned'
.. invisible-code-block: python
jinja_filter_tester(DSDLCodeGenerator.filter_alignment_prefix, template, rendered, 'py', B=B)
.. code-block:: python
# Given
B = pydsdl.BitLengthSet(32)
B += 1
# and
template = '{{ B | alignment_prefix }}'
# then ('str' is stropped to 'str_' before the version is suffixed)
rendered = 'unaligned'
.. invisible-code-block: python
jinja_filter_tester(DSDLCodeGenerator.filter_alignment_prefix, template, rendered, 'py', B=B)
:param pydsdl.BitLengthSet offset: A bit length set to test for alignment.
:return: 'aligned' or 'unaligned' based on the state of the ``offset`` argument.
"""
if isinstance(offset, pydsdl.BitLengthSet):
return "aligned" if offset.is_aligned_at_byte() else "unaligned"
else: # pragma: no cover
raise TypeError("Expected BitLengthSet, got {}".format(type(offset).__name__))
@staticmethod
def filter_bit_length_set(values: typing.Optional[typing.Union[typing.Iterable[int], int]]) -> pydsdl.BitLengthSet:
"""
Convert an integer or a list of integers into a :class:`pydsdl.BitLengthSet`.
.. invisible-code-block: python
from nunavut.jinja import DSDLCodeGenerator
import pydsdl
assert type(DSDLCodeGenerator.filter_bit_length_set(23)) == pydsdl.BitLengthSet
"""
return pydsdl.BitLengthSet(values)
@staticmethod
def filter_remove_blank_lines(text: str) -> str:
"""
Remove blank lines from the supplied string.
Lines that contain only whitespace characters are also considered blank.
.. invisible-code-block: python
from nunavut.jinja import DSDLCodeGenerator
import pydsdl
assert DSDLCodeGenerator.filter_remove_blank_lines('123\n \n\n456\n\t\n\v\f\n789') == '123\n456\n789'
"""
return re.sub(r"\n([ \t\f\v]*\n)+", r"\n", text)
@staticmethod
def filter_bits2bytes_ceil(n_bits: int) -> int:
"""
Implements ``int(ceil(x/8)) | x >= 0``.
.. invisible-code-block: python
from nunavut.jinja import DSDLCodeGenerator
assert DSDLCodeGenerator.filter_bits2bytes_ceil(50) == 7
assert DSDLCodeGenerator.filter_bits2bytes_ceil(8) == 1
assert DSDLCodeGenerator.filter_bits2bytes_ceil(7) == 1
assert DSDLCodeGenerator.filter_bits2bytes_ceil(1) == 1
assert DSDLCodeGenerator.filter_bits2bytes_ceil(0) == 0
"""
if n_bits < 0:
raise ValueError("The number of bits cannot be negative")
return (int(n_bits) + 7) // 8
# +-----------------------------------------------------------------------+
# | JINJA : tests
# +-----------------------------------------------------------------------+
@staticmethod
def is_None(value: typing.Any) -> bool:
"""
Tests if a value is ``None``
.. invisible-code-block: python
from nunavut.jinja import DSDLCodeGenerator
assert DSDLCodeGenerator.is_None(None) is True
assert DSDLCodeGenerator.is_None(1) is False
"""
return value is None
@staticmethod
def is_saturated(t: pydsdl.PrimitiveType) -> bool:
"""
Tests if a type is a saturated type or not.
.. invisible-code-block: python
from nunavut.jinja import DSDLCodeGenerator
from unittest.mock import MagicMock
import pydsdl
import pytest
saturated_mock = MagicMock(spec=pydsdl.PrimitiveType)
saturated_mock.cast_mode = pydsdl.PrimitiveType.CastMode.SATURATED
assert DSDLCodeGenerator.is_saturated(saturated_mock) is True
truncated_mock = MagicMock(spec=pydsdl.PrimitiveType)
truncated_mock.cast_mode = pydsdl.PrimitiveType.CastMode.TRUNCATED
assert DSDLCodeGenerator.is_saturated(truncated_mock) is False
with pytest.raises(TypeError):
DSDLCodeGenerator.is_saturated(MagicMock(spec=pydsdl.SerializableType))
"""
if isinstance(t, pydsdl.PrimitiveType):
return {
pydsdl.PrimitiveType.CastMode.SATURATED: True,
pydsdl.PrimitiveType.CastMode.TRUNCATED: False,
}[t.cast_mode]
else:
raise TypeError("Cast mode is not defined for {}".format(type(t).__name__))
@staticmethod
def is_service_request(instance: pydsdl.Any) -> bool:
"""
Tests if a type is request type of a service type.
.. invisible-code-block: python
from nunavut.jinja import DSDLCodeGenerator
from unittest.mock import MagicMock
import pydsdl
service_request_mock = MagicMock(spec=pydsdl.SerializableType)
service_request_mock.has_parent_service = True
service_request_mock.full_name = 'foo.bar.Service_1_0.Request'
assert DSDLCodeGenerator.is_service_request(service_request_mock) is True
service_request_mock.has_parent_service = False
assert DSDLCodeGenerator.is_service_request(service_request_mock) is False
service_request_mock.has_parent_service = True
service_request_mock.full_name = 'foo.bar.Service_1_0.Response'
assert DSDLCodeGenerator.is_service_request(service_request_mock) is False
"""
return instance.has_parent_service and instance.full_name.split(".")[-1] == "Request" # type: ignore
@staticmethod
def is_service_response(instance: pydsdl.Any) -> bool:
"""
Tests if a type is response type of a service type.
.. invisible-code-block: python
from nunavut.jinja import DSDLCodeGenerator
from unittest.mock import MagicMock
import pydsdl
service_request_mock = MagicMock(spec=pydsdl.SerializableType)
service_request_mock.has_parent_service = True
service_request_mock.full_name = 'foo.bar.Service_1_0.Response'
assert DSDLCodeGenerator.is_service_response(service_request_mock) is True
service_request_mock.has_parent_service = False
assert DSDLCodeGenerator.is_service_response(service_request_mock) is False
service_request_mock.has_parent_service = True
service_request_mock.full_name = 'foo.bar.Service_1_0.Request'
assert DSDLCodeGenerator.is_service_response(service_request_mock) is False
"""
return instance.has_parent_service and instance.full_name.split(".")[-1] == "Response" # type: ignore
@staticmethod
def is_deprecated(instance: pydsdl.Any) -> bool:
"""
Tests if a type is marked as deprecated
.. invisible-code-block: python
from nunavut.jinja import DSDLCodeGenerator
from unittest.mock import MagicMock
import pydsdl
composite_type_mock = MagicMock(spec=pydsdl.CompositeType)
composite_type_mock.deprecated = True
assert DSDLCodeGenerator.is_deprecated(composite_type_mock) is True
array_type_mock = MagicMock(spec=pydsdl.ArrayType)
array_type_mock.element_type = composite_type_mock
assert DSDLCodeGenerator.is_deprecated(array_type_mock) is True
other_type_mock = MagicMock(spec=pydsdl.SerializableType)
assert DSDLCodeGenerator.is_deprecated(other_type_mock) is False
"""
if isinstance(instance, pydsdl.CompositeType):
return instance.deprecated # type: ignore
elif isinstance(instance, pydsdl.ArrayType) and isinstance(instance.element_type, pydsdl.CompositeType):
return instance.element_type.deprecated # type: ignore
else:
return False
# +-----------------------------------------------------------------------+
def __init__(self, namespace: nunavut.Namespace, **kwargs: typing.Any):
super().__init__(namespace, **kwargs)
for test_name, test in self._create_all_dsdl_tests().items():
self._env.add_test(test_name, test)
self._env.add_conventional_methods_to_environment(self)
# +-----------------------------------------------------------------------+
# | AbstractGenerator
# +-----------------------------------------------------------------------+
def generate_all(self, is_dryrun: bool = False, allow_overwrite: bool = True) -> typing.Iterable[pathlib.Path]:
generated = [] # type: typing.List[pathlib.Path]
provider = self.namespace.get_all_types if self.generate_namespace_types else self.namespace.get_all_datatypes
for (parsed_type, output_path) in provider():
logger.info("Generating: %s", parsed_type)
generated.append(self._generate_type(parsed_type, output_path, is_dryrun, allow_overwrite))
return generated
# +-----------------------------------------------------------------------+
# | PRIVATE
# +-----------------------------------------------------------------------+
@classmethod
def _create_instance_tests_for_type(cls, root: pydsdl.Any) -> typing.Dict[str, typing.Callable]:
tests = dict()
def _field_is_instance(field_or_datatype: pydsdl.Any) -> bool:
if isinstance(field_or_datatype, pydsdl.Attribute):
return isinstance(field_or_datatype.data_type, root)
else:
return isinstance(field_or_datatype, root)
tests[root.__name__] = _field_is_instance
root_name_lower = root.__name__.lower()
if len(root_name_lower) > 4 and root_name_lower.endswith("type"):
tests[root_name_lower[:-4]] = _field_is_instance
elif len(root_name_lower) > 5 and root_name_lower.endswith("field"):
tests[root_name_lower[:-5]] = _field_is_instance
else:
tests[root_name_lower] = _field_is_instance
for derived in root.__subclasses__():
tests.update(cls._create_instance_tests_for_type(derived))
return tests
@classmethod
def _create_all_dsdl_tests(cls) -> typing.Mapping[str, typing.Callable]:
"""
Create a collection of jinja tests for all base dsdl types.
.. invisible-code-block: python
import pydsdl
from unittest.mock import MagicMock
from nunavut.jinja import DSDLCodeGenerator
test_set = DSDLCodeGenerator._create_all_dsdl_tests()
def _do_pydsdl_instance_test_test(pydsdl_obj, test_name):
if not test_set[test_name](pydsdl_obj):
raise AssertionError(test_name)
def _do_pydsdl_instance_test_tests(pydsdl_type):
mock_instance = MagicMock(spec=pydsdl_type)
_do_pydsdl_instance_test_test(mock_instance, pydsdl_type.__name__)
if pydsdl_type.__name__.endswith('Type'):
_do_pydsdl_instance_test_test(mock_instance, pydsdl_type.__name__[:-4].lower())
if pydsdl_type.__name__.endswith('Field'):
_do_pydsdl_instance_test_test(mock_instance, pydsdl_type.__name__[:-5].lower())
mock_attribute = MagicMock(spec=pydsdl.Attribute)
mock_attribute.data_type = mock_instance
_do_pydsdl_instance_test_test(mock_attribute, pydsdl_type.__name__)
_do_pydsdl_instance_test_tests(pydsdl.SerializableType)
_do_pydsdl_instance_test_tests(pydsdl.PrimitiveType)
_do_pydsdl_instance_test_tests(pydsdl.IntegerType)
_do_pydsdl_instance_test_tests(pydsdl.ServiceType)
"""
all_tests = dict()
all_tests.update(cls._create_instance_tests_for_type(pydsdl.SerializableType))
all_tests.update(cls._create_instance_tests_for_type(pydsdl.Attribute))
return all_tests
def _generate_type(
self, input_type: pydsdl.CompositeType, output_path: pathlib.Path, is_dryrun: bool, allow_overwrite: bool
) -> pathlib.Path:
template_name = self.filter_type_to_template(input_type)
template = self._env.get_template(template_name)
template_gen = template.generate(T=input_type)
if not is_dryrun:
self._generate_code(output_path, template, template_gen, allow_overwrite)
return output_path
# +---------------------------------------------------------------------------+
# | JINJA : SupportGenerator
# +---------------------------------------------------------------------------+
class SupportGenerator(CodeGenerator):
"""
Generates output files by copying them from within the Nunavut package itself
for non templates but uses jinja to generate headers from templates with the
language environment provided but no ``T`` (DSDL type) global set.
This generator always copies files from those returned by the ``file_iterator``
to locations under :func:`nunavut.Namespace.get_support_output_folder()`
"""
def __init__(self, namespace: nunavut.Namespace, **kwargs: typing.Any):
super().__init__(namespace, builtin_template_path="support", **kwargs)
target_language = self.language_context.get_target_language()
self._sub_folders = None # type: typing.Optional[pathlib.Path]
self._support_enabled = False # If not enabled then we remove any support files found
if target_language is not None:
self._support_enabled = not target_language.omit_serialization_support
# Create the sub-folder to copy-to based on the support namespace.
self._sub_folders = pathlib.Path("")
for namespace_part in target_language.support_namespace:
self._sub_folders = self._sub_folders / pathlib.Path(namespace_part)
# +-----------------------------------------------------------------------+
# | AbstractGenerator
# +-----------------------------------------------------------------------+
def get_templates(self) -> typing.Iterable[pathlib.Path]:
files = []
target_language = self.language_context.get_target_language()
if target_language is not None:
for resource in target_language.support_files:
files.append(resource)
return files
def generate_all(self, is_dryrun: bool = False, allow_overwrite: bool = True) -> typing.Iterable[pathlib.Path]:
target_language = self.language_context.get_target_language()
if self._sub_folders is None or target_language is None:
logger.info("No target language, therefore, no support headers")
return []
else:
return self._generate_all(target_language, self._sub_folders, is_dryrun, allow_overwrite)
# +-----------------------------------------------------------------------+
# | Private
# +-----------------------------------------------------------------------+
def _generate_all(
self, target_language: nunavut.lang.Language, sub_folders: pathlib.Path, is_dryrun: bool, allow_overwrite: bool
) -> typing.Iterable[pathlib.Path]:
target_path = pathlib.Path(self.namespace.get_support_output_folder()) / sub_folders
line_pps = [] # type: typing.List['nunavut.postprocessors.LinePostProcessor']
file_pps = [] # type: typing.List['nunavut.postprocessors.FilePostProcessor']
if self._post_processors is not None:
for pp in self._post_processors:
if isinstance(pp, nunavut.postprocessors.LinePostProcessor):
line_pps.append(pp)
elif isinstance(pp, nunavut.postprocessors.FilePostProcessor):
file_pps.append(pp)
else:
raise ValueError("PostProcessor type {} is unknown.".format(type(pp)))
generated = [] # type: typing.List[pathlib.Path]
for resource in self.get_templates():
target = (target_path / resource.name).with_suffix(target_language.extension)
logger.info("Generating support file: %s", target)
if not self._support_enabled:
self._remove_header(target, is_dryrun, allow_overwrite)
elif resource.suffix == TEMPLATE_SUFFIX:
self._generate_header(resource, target, is_dryrun, allow_overwrite)
generated.append(target)
else:
self._copy_header(resource, target, is_dryrun, allow_overwrite, line_pps, file_pps)
generated.append(target)
return generated
def _remove_header(self, target: pathlib.Path, is_dryrun: bool, allow_overwrite: bool) -> None:
if not is_dryrun:
if not allow_overwrite and target.exists():
raise PermissionError("{} exists. Refusing to remove.".format(str(target)))
try:
target.unlink()
except FileNotFoundError:
# missing_ok was added in python 3.8 so this try/except statement will
# go away someday when python 3.7 support is dropped.
pass
def _generate_header(
self, template_path: pathlib.Path, output_path: pathlib.Path, is_dryrun: bool, allow_overwrite: bool
) -> pathlib.Path:
template = self._env.get_template(template_path.name)
template_gen = template.generate()
if not is_dryrun:
self._generate_code(output_path, template, template_gen, allow_overwrite)
return output_path
def _copy_header(
self,
resource: pathlib.Path,
target: pathlib.Path,
is_dryrun: bool,
allow_overwrite: bool,
line_pps: typing.List["nunavut.postprocessors.LinePostProcessor"],
file_pps: typing.List["nunavut.postprocessors.FilePostProcessor"],
) -> pathlib.Path:
if not is_dryrun:
self._handle_overwrite(target, allow_overwrite)
target.parent.mkdir(parents=True, exist_ok=True)
if len(line_pps) == 0:
shutil.copy(str(resource), str(target))
else:
self._copy_header_using_line_pps(resource, target, line_pps)
for file_pp in file_pps:
target = file_pp(target)
return target
def _copy_header_using_line_pps(
self,
resource: pathlib.Path,
target: pathlib.Path,
line_pps: typing.List["nunavut.postprocessors.LinePostProcessor"],
) -> None:
with open(str(target), "w") as target_file:
with open(str(resource), "r") as resource_file:
for resource_line in resource_file:
if len(resource_line) > 1 and resource_line[-2] == "\r":
resource_line_tuple = (resource_line[0:-2], "\r\n")
else:
resource_line_tuple = (resource_line[0:-1], "\n")
for line_pp in line_pps:
resource_line_tuple = line_pp(resource_line_tuple)
target_file.write(resource_line_tuple[0])
target_file.write(resource_line_tuple[1])
| StarcoderdataPython |
6624037 | <filename>extensions/games.py
'''
MIT License
Copyright (c) 2020 <NAME> & <NAME>
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
'''
import discord
from discord.ext import commands
import random
import utils
WORDS = open("words.txt").read().split("\n")
class Games(commands.Cog):
def __init__(self, bot):
self.bot = bot
self.ongoing_games = {}
@commands.command()
async def hangman(self, ctx):
if ctx.channel.id in self.ongoing_games.keys():
return await ctx.send(":x: **There is already an ongoing game in the current channel**")
word = random.choice(WORDS)
disp = ["\_" for _ in range(len(word))]
disp[0] = ">\_<"
self.ongoing_games[ctx.channel.id] = {
"game": "hangman",
"user": ctx.author,
"word": word,
"current_letter": 0,
"turnNo": 0,
"damage": 0,
"guessed_letters": [],
"display_string": " ".join(disp)
}
print(self.ongoing_games[ctx.channel.id])
await ctx.send(f":white_check_mark: **Hangman Started**\n"+self.ongoing_games[ctx.channel.id]["display_string"])
@commands.command()
async def stopgame(self, ctx):
if ctx.channel.id not in self.ongoing_games.keys():
return await ctx.send(":x: **There is not ongoing game in the current channel**")
self.ongoing_games.pop(ctx.channel.id)
await ctx.send("**:white_check_mark: Game successfully ended**")
@commands.Cog.listener()
async def on_message(self, message):
ctx = await self.bot.get_context(message)
if ctx.valid:
return
if ctx.author.bot:
return
if ctx.channel.id not in self.ongoing_games.keys():
return
gameData = self.ongoing_games[ctx.channel.id]
if ctx.author != gameData["user"]:
return
if gameData["game"] == "hangman":
guess = message.content.split(" ")[0]
if len(guess) > 1:
return await ctx.send("Please send only a single letter to guess the next letter in the word")
if not guess.isalpha():
return await ctx.send("Letters only please")
gameData["turnNo"] += 1
if message.content[0].lower() == gameData["word"][gameData["current_letter"]]:
if (gameData["current_letter"]+1) >= len(gameData["word"]):
await ctx.send(f"**Congratulations! You won! The word was ``{gameData['word']}``**")
del self.ongoing_games[ctx.channel.id]
else:
disp = gameData["display_string"].split(" ")
disp[gameData["current_letter"]] = "__" + guess + "__"
gameData["current_letter"] += 1
disp[gameData["current_letter"]] = ">\_<"
gameData["display_string"] = " ".join(disp)
await ctx.send(gameData["display_string"])
else:
gameData["damage"] += 1
if gameData["damage"] >= 7:
await ctx.send(f"**You died! The word was ``{gameData['word']}``**")
del self.ongoing_games[ctx.channel.id]
else:
gameData["guessed_letters"].append(guess)
await ctx.send(f"**Incorrect: ``{7-gameData['damage']}`` lives left**\nGuessed letters: {' '.join(gameData['guessed_letters'])}\n{gameData['display_string']}")
def setup(bot):
bot.add_cog(Games(bot)) | StarcoderdataPython |
6533169 | <reponame>XROBOTICS/web<gh_stars>0
from django.db import models
class Post(models.Model):
post = models.CharField(max_length=500)
created = models.DateTimeField(auto_now_add=True)
updated = models.DateTimeField(auto_now=True) | StarcoderdataPython |
11396503 | import numpy as np
import pandas as pd
# for size in [100,1000,10000,100000,1000000]:
time_loops = [["M.EGG", "M.Sensor", "M.Concentration"]]
import matplotlib.pyplot as plt
from yafs.stats import Stats
#### ANALYSE FILES YAFS
police = "cloud"
police = "edge"
# print depth
for idx1, depth in enumerate([16]):
# for idx1, depth in enumerate([2]):
# 1000, 10000,
for idx2, size in enumerate([10000]):
# size = 100000
print("DEPTH: %i | TIME: %s" % (depth, size))
path = "results/Results_%s_%s_%s" % (police, size, depth)
print(path)
s = Stats(path)
# Network
s.showResults2(size, time_loops=time_loops)
print("\t Bytes transmitted: ", s.bytes_transmitted())
print("\t Messages transmitted: ", s.count_messages())
print("\t- Network saturation -")
print("\t\tAverage waiting messages : %i" % s.average_messages_not_transmitted())
print("\t\tPeak of waiting messages : %i" % s.peak_messages_not_transmitted())
print("\t\tTOTAL messages not transmitted: %i" % s.messages_not_transmitted())
# LOOPS
# res = s.showLoops(time_loops)
# loopstime[depth][idx2]=res[0]
#
# #Print the execution delay
# print s.times("time_total_response")
#
# print "Latency Acc: ", s.df_link["latency"].sum()
print("*" * 40)
police = "edge"
stop_time = 10000
dep = 16
path = "results/Results_%s_%s_%s.csv" % (police, stop_time, dep)
print(path)
df = pd.read_csv(path)
df["time_latency"] = df["time_reception"] - df["time_emit"]
resp_msg = df.groupby("message").agg({"time_latency": ["mean", "count"]}) # Its not necessary to have "count"
resp_msg.columns = ['_'.join(col).strip() for col in resp_msg.columns.values]
results = []
for loop in time_loops:
total = 0.0
for msg in loop:
print('x' * 20)
print(msg)
print(resp_msg.index)
print(resp_msg.columns)
print('x' * 20)
try:
# não está achando a prop ttrm
total += resp_msg[resp_msg.index == msg].time_total_response_mean[0]
except:
total += 0
results.append(total)
lat = df["time_latency"].describe()
df["time_latency"].plot()
df["date"] = df.time_in.astype('datetime64[s]')
df.index = df.date
# df = df.resample('1s').agg(dict(time_latency='mean'))
timeLatency = df.time_latency.values
ticks = range(len(timeLatency))
# OK
### Latency Time and Allocation replicas
fig = plt.figure()
ax1 = fig.add_subplot(111)
ax1.plot(lat, '-')
# ax1.set_ylim(timeLatency.min()-0.5,timeLatency.max()+0.5)
ax1.set_xlabel("Simulation time", fontsize=16)
ax1.set_ylabel("Latency time", fontsize=16)
| StarcoderdataPython |
6461028 | <filename>sanity/cast.py
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from datetime import date, datetime
import json
def to_ascii(s, replace=''):
"""
>>> to_ascii(None)
''
>>> to_ascii('hi there')
'hi there'
>>> to_ascii('hi €there')
'hi there'
Watch what happens here -- a reminder that unicode chars take multiple bytes!
>>> to_ascii('hi €there', replace='!')
'hi !!!there'
>>> to_ascii(u'hi there')
'hi there'
>>> to_ascii(u'hi—there')
'hi—there'
>>> to_ascii(1)
'1'
>>> to_ascii(3.14)
'3.14'
"""
if s is None:
return ''
if not isinstance(s, (str, unicode)):
s = to_str(s)
letters = []
for c in s:
try:
c = str(c)
except:
letters.append(replace)
else:
if 0 <= ord(c) <= 128:
letters.append(c)
else:
letters.append(replace)
return ''.join(letters)
def to_bool(input):
"""
>>> to_bool('1')
True
>>> to_bool('True')
True
>>> to_bool('true')
True
>>> to_bool(True)
True
>>> to_bool(False)
False
>>> to_bool('False')
False
>>> to_bool('false')
False
>>> to_bool('0')
False
>>> to_bool(None)
False
>>> to_bool('on')
True
>>> to_bool('off')
False
>>> to_bool('yes')
True
>>> to_bool('no')
False
"""
if input is None:
return False
if input in (u'0', '0'):
return False
elif input in (u'False', 'False', u'false', 'false'):
return False
elif input in (u'off', 'off', u'no', 'no'):
return False
else:
if bool(input):
return True
else:
return False
def to_int(arg, default=0):
"""
>>> to_int('0')
0
>>> to_int('1')
1
>>> to_int('a')
0
>>> to_int('12.3')
0
>>> to_int('1a2b3c')
0
>>> to_int('<1a2b3c/>')
0
>>> to_int(None)
0
>>> to_int('None')
0
>>> to_int(1)
1
>>> to_int(u'')
0
>>> to_int(1, None)
1
>>> to_int('hi', 0)
0
>>> to_int(None, 0)
0
>>> to_int(None, None)
>>> to_int(u'', 0)
0
>>> to_int(u'-1')
-1
"""
try:
return int(arg)
except:
return default
def to_jsonable(d, date_format='%Y-%m-%d', datetime_format='%Y-%m-%dT%H-%M-%SZ'):
"""
>>> to_jsonable(None)
''
>>> to_jsonable('hi')
'hi'
>>> to_jsonable({'hi':'there'})
{'hi': 'there'}
>>> to_jsonable({'hi':'there', 'one': 1})
{'hi': 'there', 'one': 1}
>>> t = date.today()
>>> result = to_jsonable({'hi':'there', 'today': t})
>>> expected = {'hi': 'there', 'today': '{}'.format(t.strftime('%Y-%m-%d'))}
>>> result == expected
True
>>> result['today'] == '{}'.format(t.strftime('%Y-%m-%d'))
True
>>> to_jsonable({'hi':'there', 'one': [1, 2, 3, 4]})
{'hi': 'there', 'one': [1, 2, 3, 4]}
>>> to_jsonable({'hi':'there', 'one': ['1', '2', '3', '4']})
{'hi': 'there', 'one': ['1', '2', '3', '4']}
>>> f = TestObject()
>>> to_jsonable({'hi': f})
{'hi': 'TestObject'}
>>> to_jsonable({'hi':'there', 'one': [f, f, f]})
{'hi': 'there', 'one': ['TestObject', 'TestObject', 'TestObject']}
"""
if d is None:
return ''
elif isinstance(d, (dict)):
results = {}
for k, v in d.items():
results[k] = to_jsonable(v)
return results
elif isinstance(d, (str, unicode)):
return d
elif isinstance(d, (int, float)):
return d
elif isinstance(d, (date,)):
return '{}'.format(d.strftime(date_format))
elif isinstance(d, (datetime,)):
return '{}'.format(d.strftime(datetime_format))
elif isinstance(d, (list,)):
results = []
for item in d:
results.append(to_jsonable(item))
return results
elif isinstance(d, (object,)):
string_representation = str(d)
if string_representation.startswith('<') and string_representation.endswith('>'):
# Then it's probably a generic Type representation, and not something we want
# to send over JSON.
try:
# Extract the classname and use that
string_representation = str(d.__class__).split('.')[-1]
except:
pass
return string_representation
else:
return str(d)
def to_json(d, date_format='%Y-%m-%d', datetime_format='%Y-%m-%dT%H-%M-%SZ'):
"""
>>> to_json({'hi':'there'})
'{"hi": "there"}'
>>> to_json({'hi':'there', 'one': 1})
'{"hi": "there", "one": 1}'
>>> t = date.today()
>>> result = to_json({'hi':'there', 'today': t})
>>> expected = '{{"hi": "there", "today": "{}"}}'.format(t.strftime('%Y-%m-%d'))
>>> result == expected
True
>>> f = TestObject()
>>> to_json({'hi': f})
'{"hi": "TestObject"}'
"""
return json.dumps(to_jsonable(d))
def to_latin_one(s):
return to_str(s, encoding='latin-1', errors='ignore')
def to_str(s, encoding='utf-8', errors='strict'):
"""
MOSTLY FROM DJANGO 1.3 django.utils.encoding
Returns a bytestring version of 's', encoded as specified in 'encoding'.
>>> to_str('Hi There')
'Hi There'
"""
if not isinstance(s, basestring):
try:
return str(s)
except UnicodeEncodeError:
if isinstance(s, Exception):
return ' '.join([to_str(arg, encoding, errors) for arg in s])
return unicode(s).encode(encoding, errors)
elif isinstance(s, unicode):
return s.encode(encoding, errors)
elif s and encoding != 'utf-8':
return s.decode('utf-8', errors).encode(encoding, errors)
else:
return s
def to_unicode(s, encoding='utf-8', errors='strict'):
"""
MOSTLY FROM DJANGO 1.3 django.utils.encoding
>>> to_unicode('Hi There')
u'Hi There'
"""
# Handle the common case first, saves 30-40% in performance when s
# is an instance of unicode. This function gets called often in that
# setting.
if isinstance(s, unicode):
return s
try:
if not isinstance(s, basestring,):
if hasattr(s, '__unicode__'):
s = unicode(s)
else:
try:
s = unicode(str(s), encoding, errors)
except UnicodeEncodeError:
s = ' '.join([to_unicode(arg, encoding, errors) for arg in s])
elif not isinstance(s, unicode):
# Note: We use .decode() here, instead of unicode(s, encoding,
# errors), so that if s is a SafeString, it ends up being a
# SafeUnicode at the end.
s = s.decode(encoding, errors)
except UnicodeDecodeError:
s = ' '.join([to_unicode(arg, encoding, errors) for arg in s])
return s
## ---------------------
if __name__ == "__main__":
import doctest
print("[cast.py] Testing...")
class TestObject():
message = 'howdy'
doctest.testmod()
print("Done.")
| StarcoderdataPython |
3313933 | <filename>d3t/watcher.py
from contextlib import contextmanager
from django.template.base import Node, Template
from .rendering import Rendering
from .signals import node_rendered, template_rendered
__all__ = [
'watch_templates',
]
def wrap_template_render(render_function):
def wrapper(template, context):
result = render_function(template, context)
template_rendered.send(sender=None, template=template, context=context, result=result)
return result
return wrapper
def wrap_node_render(render_function):
def wrapper(node, context):
result = render_function(node, context)
node_rendered.send(sender=None, node=node, result=result)
return result
return wrapper
@contextmanager
def mock_template_render():
original_function = Template._render
Template._render = wrap_template_render(Template._render)
yield
Template._render = original_function
@contextmanager
def mock_node_render():
original_function = Node.render_annotated
Node.render_annotated = wrap_node_render(Node.render_annotated)
yield
Node.render_annotated = original_function
@contextmanager
def watch_templates():
rendering = Rendering()
template_rendered.connect(rendering.register_template)
node_rendered.connect(rendering.register_node)
with mock_template_render(), mock_node_render():
yield rendering
template_rendered.disconnect(rendering.register_template)
node_rendered.disconnect(rendering.register_node)
| StarcoderdataPython |
1705556 | #!/usr/bin/env python
'''
web
The Web application. You know, templates and stuff.
'''
from flask import Blueprint
api = Blueprint('web', __name__, template_folder='templates', static_folder='static', static_url_path='/summary/static')
# Exposed Endpoints
from web.controller import index
| StarcoderdataPython |
6409175 | import json
import logging
import os
from model import script_configs
from utils import os_utils, file_utils
LOGGER = logging.getLogger('config_service')
def _load_script_config(path, content_or_json_dict):
if isinstance(content_or_json_dict, str):
json_object = json.loads(content_or_json_dict)
else:
json_object = content_or_json_dict
return script_configs.from_json(path, json_object, os_utils.is_pty_supported())
class ConfigService:
def __init__(self, conf_folder) -> None:
self._script_configs_folder = os.path.join(conf_folder, 'runners')
file_utils.prepare_folder(self._script_configs_folder)
def list_configs(self):
def load_script(path, content):
try:
json_object = json.loads(content)
return _load_script_config(path, json_object)
except:
LOGGER.exception('Could not load script: ' + path)
return self.visit_script_configs(load_script)
def load_config(self, name):
def find_and_load(path, content):
try:
json_object = json.loads(content)
config_name = script_configs.read_name(path, json_object)
if config_name == name:
return _load_script_config(path, json_object)
except:
LOGGER.exception('Could not load script config: ' + path)
configs = self.visit_script_configs(find_and_load)
if configs:
return configs[0]
return None
def visit_script_configs(self, visitor):
configs_dir = self._script_configs_folder
files = os.listdir(configs_dir)
configs = [file for file in files if file.lower().endswith(".json")]
result = []
for config_path in configs:
path = os.path.join(configs_dir, config_path)
try:
content = file_utils.read_file(path)
visit_result = visitor(path, content)
if visit_result is not None:
result.append(visit_result)
except:
LOGGER.exception("Couldn't read the file: " + config_path)
return result
| StarcoderdataPython |
1950757 | <gh_stars>1-10
"""
This file will hold a tracker class, which itself will call some other trackers,
such as the OpenCV or special trackers.
It is essentially a wrapper class to make the main file have consistent API
"""
import cv2
import numpy as np
from collections import defaultdict
from driver_risk_utils import general_utils, tracker_utils
import multi_trackers
class Tracker:
"""
Tracker class that serves as the interface between our system and openCV or
other trackers that we have implemented.
Main function is `update_one`:
It updates one of the trackers with the output from a network,
the input image queue, and an index for the image.
"""
def __init__(self,
args,
tracker_type,
image_height,
image_width,
category_index):
"""
Arguents
args, and argument_utils args object, containing:
det_thresh
tracker_refresh
track
tracker_type:
String. What type of tracker?
image_height:
Height of the image in pixels. Integer.
image_width:
Width of the image in pixels. Integer.
category_index:
Dictionary for what categories are which from the model.
"""
self.tracker_type = tracker_type
self.det_thresh = args.det_thresh
self.tracker_refresh = args.tracker_refresh
self.multi_tracker = None
self.use_tracker = args.track
self.args = args
self.init_tracker = True
self.labels = [] # i -> list of labels
self.horizon = args.horizon
self.category_index = category_index
self.timer = general_utils.Timing()
self.timer.update_start("Overall")
if self.use_tracker:
self._create_multi_tracker()
def __del__(self):
string = "\n=============== Ending Tracker =============="
self.timer.update_end("Overall")
string += "\nTiming:" + self.timer.print_stats(True)
string += "\n==============\n"
print(string)
def _create_multi_tracker(self):
"""
Set the internal self.multi_tracker variable to one of the wrappers
implemented in multi_trackers.py.
"""
self.timer.update_start("Initialization")
if self.tracker_type == "KCF":
self.multi_tracker = multi_trackers.OpenCVMultiTrackerWrapper(
self.tracker_type
)
elif self.tracker_type == "Particle":
self.multi_tracker = multi_trackers.ParticleTrackerWrapper(
self.args.num_tracker_particles,
self.args.num_trackers,
self.args.tracker_hold
)
# TODO update args
else:
tracker_utils.raise_undefined_tracker_type(self.tracker_type)
self.timer.update_end("Initialization")
def needs_boxes(self):
""" returns true if the tracker always needs object detections to run"""
return(self.tracker_type in {
"Particle"
})
def update_if_init(self, elapsed_frames):
"""
Update, based on the elapsed frames and tracker refresh rate initially
set, our internal flag on if we want to initialize the tracker.
"""
self.init_tracker = elapsed_frames % self.tracker_refresh == 1
def check_and_reset_multitracker(self, state_object):
"""
If we need to reset the tracker, do so by creating a new multi_tracker
(which will delete the old one), resetting the label list, and telling
the referenced state_object to clear its data.
"""
if self.use_tracker and self.init_tracker:
self.timer.update_start("Reset")
self._create_multi_tracker()
self.lables = defaultdict(list)
state_object.clear()
self.timer.update_end("Reset")
def update_one(self, image_index, net_out, image, verbose=False):
"""
The main function that is called. Uses the image and object detections
to update the tracker given a single image.
Arguments
image_index: integer
The index into the object detection network's output, for the
current image.
net_out: dict<string: list < boxes, scores, or classes > >
The output from the object detection network.
image: np array
The image.
verbose: boolean.
True if we want to print extra logging info.
Returns:
boxes_with_labels: dictionary <int : tuple<box, str> >
dictionary of object key : tuple of box coordinates and class label
"""
self.timer.update_start("Update One")
boxes_with_labels = dict()
boxes = None
if net_out is not None:
boxes = net_out['detection_boxes'][image_index][np.where(\
net_out['detection_scores'][image_index] >= self.det_thresh)]
self.labels = [self.category_index[key]['name'] for key in \
net_out['detection_classes'][image_index][np.where(\
net_out['detection_scores'][image_index] >= self.det_thresh)]
]
'''
boxes, self.labels = general_utils.filter_boxes(
net_out,
self.det_thresh,
self.horizon,
self.category_index,
image_index
)
'''
if self.init_tracker:
self.init_tracker = False
self.multi_tracker.initialize_tracker(image, boxes, self.labels)
im_h, im_w, _ = image.shape
for i,b in enumerate(boxes):
if i >= len(self.labels):
self.labels.extend([""])
boxes_with_labels[i] = (
general_utils.convert(im_h, im_w, b),
self.labels[i]
)
else:
ok, boxes_with_labels = self.multi_tracker.update_all(
image, boxes, self.labels, verbose)
if ok is False: # lost tracking
self.init_tracker = True
self.timer.update_end("Update One")
return boxes_with_labels
| StarcoderdataPython |
1848401 | #!/usr/bin/env python
# GIMP Plug-in for the Sega Dreamcast MR file format
# Copyright (C) 2019 by BBHoodsta
#
# Encoding based on logotools(pngtomr.c) source code by <NAME> (ADK)
# http://napalm-x.thegypsy.com/andrewk/dc/
#
# Decoding based on Selfboot Inducer (mrimage.pas) source code by SiZiOUS
# http://sizious.com/
import os, sys, struct
from gimpfu import *
from array import array
def to_bytes(n, length, endianess='big'):
if(sys.version_info[0] < 3):
h = '%x' % n
s = ('0'*(len(h) % 2) + h).zfill(length*2).decode('hex')
return s if endianess == 'big' else s[::-1]
else:
return n.to_bytes(length, byteorder=endianess)
def mr_encode(input, output, size):
length = 0
position = 0
run = 0
while(position < size):
run = 1
while((run < 0x17f) and (position+run < size) and (input[position] == input[position+run])):
run += 1
if(run > 0xff):
output[length] = 0x82
length += 1
output[length] = 0x80 | (run - 0x100)
length += 1
output[length] = input[position]
length += 1
elif(run > 0x7f):
output[length] = 0x81
length += 1
output[length] = run
length += 1
output[length] = input[position]
length += 1
elif(run > 1):
output[length] = 0x80 | run
length += 1
output[length] = input[position]
length += 1
else:
output[length] = input[position]
length += 1
position += run
return length
def save_mr(img, drawable, filename, raw_filename):
src_width = img.width # image width
src_height = img.height # image height
# Display error if image is bigger than 320x90
if(src_width > 320 or src_height > 90):
gimp.message("Your image should be 320x90 or smaller\n")
return
# Grab region of pixels
src_rgn = drawable.get_pixel_rgn(0, 0, src_width, src_height, False, False)
# Create a pixel array from the region
src_pixels = array("B", src_rgn[0:src_width, 0:src_height])
# Allocate raw and compressed outputs
raw_output = array("B", "\x00" * (src_width * src_height ))
compressed_output = array("B", "\x00" * (src_width * src_height))
psize = len(src_rgn[0,0]) # Should be 3
palette_count = 0
palette_colors = array("B", "\x00" * (128 * 3)) # 128 colors (RGB)
# Generate raw_output and create the palette of the image
for i in range(src_width*src_height):
found = False
palette_index = 0
pixel_index = i * psize
while(not found and palette_index < palette_count):
if(src_pixels[pixel_index:pixel_index+psize] == palette_colors[palette_index*3:palette_index*3+psize]):
found = True
else:
palette_index += 1
# Display error message if image has more than 128 colors
if(not found and palette_index == 128):
gimp.message("Reduce the number of colors to <= 128 and try again.\n")
return
if(not found):
palette_colors[palette_index*3:palette_index*3+psize] = src_pixels[pixel_index:pixel_index+psize]
palette_count += 1
raw_output[i] = palette_index
compressed_size = mr_encode(raw_output, compressed_output, src_width*src_height)
# Display warning if compressed image is bigger than 8192 bytes
if(compressed_size > 8192):
gimp.message("WARNING: This will NOT fit in a normal ip.bin - it is %d bytes too big!\n", compressed_size - 8192)
crap = 0
endianness = 'little'
offset = 30 + palette_count*4 # 30 byte header
size = offset + compressed_size
with open(filename, 'wb') as output:
output.write("MR") #
output.write(to_bytes(size, 4, endianness)) # Filesize
output.write(to_bytes(crap, 4, endianness)) #
output.write(to_bytes(offset, 4, endianness)) # Data offset
output.write(to_bytes(src_width, 4, endianness)) # Image width
output.write(to_bytes(src_height, 4, endianness)) # Image height
output.write(to_bytes(crap, 4, endianness)) #
output.write(to_bytes(palette_count, 4, endianness)) # Amount of colors in palette
#output.write(struct.pack("<iiiiiii", size, crap, offset, src_width, src_height, crap, palette_count))
for i in range(palette_count):
palette_color = palette_colors[i*3:i*3+3]
# Write RGB => BGR
for x in reversed(range(3)):
output.write(to_bytes(palette_color[x], 1, endianness))
output.write(to_bytes(crap, 1, endianness)) # Unused alpha
for i in range(compressed_size):
output.write(to_bytes(compressed_output[i], 1, endianness))
def mr_decode(input, cdata_size, idata_size):
position = 0
idx_position = 0
run = 0
indexed_data = array("B", "\x00" * idata_size)
while(position < cdata_size):
first_byte = input[position]
if((position+1) < cdata_size):
second_byte = input[position+1]
# The bytes lower than 0x80 are recopied just as they are in the Bitmap
if(first_byte < 0x80):
run = 1
position += 1
# The tag 0x81 is followed by a byte giving directly the count of points
elif(first_byte == 0x81):
run = second_byte
first_byte = input[position+2]
position += 3
# The tag 0x82 is followed by the number of the points decoded in Run
# By retaining only the first byte for each point
elif(first_byte == 0x82 and second_byte >= 0x80):
run = second_byte - 0x80 + 0x100
first_byte = input[position+2]
position += 3
else:
run = first_byte - 0x80
first_byte = second_byte
position += 2
# Writing decompressed bytes
for i in range(run):
# The additional byte (+ 1) is useless, but it always present in MR files.
if(idx_position+i < idata_size):
indexed_data[idx_position+i] = first_byte
idx_position += run
return indexed_data
def load_mr(filename, raw_filename):
opacity = 100
file_content = ""
# Get content of file
with open(filename, 'rb') as input:
file_content = input.read()
# Parse header
header_blob = struct.unpack("<iiiiiii", file_content[2:30]) # Grab header ignoring 'MR'
filesize = header_blob[0]
dataoffset = header_blob[2]
img_width = header_blob[3]
img_height = header_blob[4]
num_colors = header_blob[6]
# Parse Palette
rgb_palette = array("B", "\x00" * (num_colors*3))
bgra_palette = struct.unpack("<" + ("B"*(num_colors*4)), file_content[30:30+num_colors*4])
for i in range(num_colors):
# Convert BGRA => RGB
for x in reversed(range(3)):
rgb_palette[i*3+(2-x)] = bgra_palette[i*3+x+i]
# Decode indexed data
cdata_size = filesize - dataoffset
compressed_data = struct.unpack("<" + ("B" * cdata_size), file_content[dataoffset:dataoffset + cdata_size])
indexed_data = mr_decode(compressed_data, cdata_size, img_width * img_height)
# Indexed data => RGB data
rgb_data = array("B", "\x00" * (img_width * img_height * 3))
for i in range(img_width * img_height):
index = indexed_data[i]
rgb_data[i*3:i*3+3] = rgb_palette[index*3:index*3+3]
# Create image
img = gimp.Image(img_width, img_height, RGB)
img.filename = filename
img_layer = gimp.Layer(img, filename, img_width, img_height, RGB_IMAGE, opacity, NORMAL_MODE)
img_layer_region = img_layer.get_pixel_rgn(0, 0, img_width, img_height, True)
img_layer_region[0:img_width, 0:img_height] = rgb_data.tostring()
img.add_layer(img_layer, 0)
gimp.displays_flush()
return img
def register_load_handlers():
gimp.register_load_handler('file-mr-load', 'mr', '')
pdb['gimp-register-file-handler-mime']('file-mr-load', 'image/mr')
def register_save_handlers():
gimp.register_save_handler('file-mr-save', 'mr', '')
register(
'file-mr-save', #name
'Save an MR (.mr) file', #description
'Save an MR (.mr) file',
'BBHoodsta', #author
'BBHoodsta', #copyright
'2019', #year
'Sega Dreamcast MR image',
'*',
[ #input args. Format (type, name, description, default [, extra])
(PF_IMAGE, "image", "Input image", None),
(PF_DRAWABLE, "drawable", "Input drawable", None),
(PF_STRING, "filename", "The name of the file", None),
(PF_STRING, "raw-filename", "The name of the file", None),
],
[], #results. Format (type, name, description)
save_mr, #callback
on_query = register_save_handlers,
menu = '<Save>'
)
register(
'file-mr-load', #name
'Load an MR (.mr) file', #description
'Load an MR (.mr) file',
'BBHoodsta', #author
'BBHoodsta', #copyright
'2019', #year
'Sega Dreamcast MR image',
None, #image type
[ #input args. Format (type, name, description, default [, extra])
(PF_STRING, 'filename', 'The name of the file to load', None),
(PF_STRING, 'raw-filename', 'The name entered', None),
],
[(PF_IMAGE, 'image', 'Output image')], #results. Format (type, name, description)
load_mr, #callback
on_query = register_load_handlers,
menu = "<Load>",
)
main()
| StarcoderdataPython |
111479 | <filename>fluent/cldr/__init__.py
""" CLDR/ICU pluralization support.
ICU/ARB plurals support
=======================
Out `Translation` objects have a JSON `plural_texts` field which holds the plural forms. There are
six case keywords which hold message versions depending on the number: zero, one, two, few, many, other.
If a value is missing for anyone of them `other` is used.
Additionally the JSON value may hold message versions for explicit cases (=1, =42).
#TODO (this is what the specs say, but it'll be done in ticket #952)
When we're looking up a number N, we check if `plural_texts` holds a specific =N case, and use it.
If it doesn't we pass N through the plural_rules code to determine the case keyword to use.
The specs addtionally define how to substitute the `#` character for a locale formatted N value, but
we're ignoring that part and simply expect a normal ARB curly braced placeholder for the value.
Singular translations
=====================
For singular translations we'll keep the translated text in `plural_texts[ONE]`, except for
Asian family of languages with no plurals, where the form is always OTHER. That's why
you should always lookup `get_plural_index()` for the given language. Translation
objects define a `text` property to get and assign the singular form transparently.
Examples and Docs
=================
How this actually works:
(http://icu-project.org/apiref/icu4j/com/ibm/icu/text/PluralFormat.html)
Pluralization rules:
http://www.unicode.org/cldr/charts/latest/supplemental/language_plural_rules.html
"{COMB_OF_GENDER_AND_PLURAL,
select,
female {{
NUM_VALUES, plural, =1 {Category} other {Categories}
}}
male {{
NUM_VALUES, plural, =1 {Category} other {Categories}
}}
other {{
NUM_VALUES, plural, =1 {Category} other {Categories}
}}
}",
"{NUM_EMAILS_TO_SEND, plural, =0 {unused plural form} =1 {One email will be sent.} other {# emails will be sent.}}"
Gettext support
===============
We're keeping the internal representation closer to the cldr rules because they're more complex and verbose
(explicit ONE, ZERO, FEW, MANY) forms. Gettext uses a simpler approach with an indexed list of forms and a simple
function for computing the index. There might be some convention to the order of plural forms for languages,
so instead of generating the indexed form from our representation we manually match a Gettext
plural form definition from here https://localization-guide.readthedocs.io/en/latest/l10n/pluralforms.html
to our cldr functions.
Using that we can match indexed ordered forms to codenamed cldr forms allowing for *.po import and export.
"""
import re
from decimal import Decimal, InvalidOperation
from collections import OrderedDict
from fluent.cldr.rules import get_plural_index, get_rules_for_language
# Trying to keep the the data small
_json_kw = ZERO, ONE, TWO, FEW, MANY, OTHER = 'zotfmh'
_icu_kw = 'zero', 'one', 'two', 'few', 'many', 'other'
ICU_KEYWORDS = OrderedDict(zip(_icu_kw, _json_kw))
#RE_FORMAT_SYMBOLS = re.compile(r'(?<!%)(?:%%)*%s')
RE_PYTHON_PLACEHOLDERS = re.compile(r'(?<!%)(?:%%)*%\(([^\)]+)\)s')
RE_ICU_PLACEHOLDERS = re.compile(r'{([^}]+)}')
# Just a regular message, a string with curly braced variables
RE_ICU_MSG = re.compile(r'(^[^{}]*(?:{[A-Za-z_]+}[^{}]*)*$)')
# Something, something, plural forms
RE_ICU_PLURAL_MSG = re.compile(r'^{\s*[A-Za-z_]+\s*,\s*plural\s*,\s*(?P<plurals>.+)}$')
def _icu_encode(text):
""" Changes placeholder representation from python to curly braces, and removes double percentages."""
return RE_PYTHON_PLACEHOLDERS.sub(r"{\1}", text).replace('%%', '%')
def _icu_decode(text):
""" Change placeholders into python's representation and double encode percentages."""
return RE_ICU_PLACEHOLDERS.sub(r"%(\1)s", text.replace('%', '%%'))
def _export_plurals(plurals):
""" Encode a plurals dict in ICU format.
First the explicit `=1` directives, followed by the six ICU keyword messages (whichever
of them exist in the data dictionry). """
parts = ["{NUM, plural,"]
keyword_parts = []
for icu_key, key in ICU_KEYWORDS.items():
if key in plurals:
message = _icu_encode(plurals.pop(key))
keyword_parts.append(" %s {%s}" % (icu_key, message))
# The remaining keys in the dictionary are numbers
for key in sorted(plurals.keys()):
assert isinstance(key, (int, long))
message = _icu_encode(plurals.pop(key))
parts.append(" =%s {%s}" % (key, message))
parts.extend(keyword_parts)
parts.append("}")
return "".join(parts)
def export_master_message(master):
if not master.plural_text:
return _icu_encode(master.text)
# Assuming master is English and populate ONE and OTHER forms
return _export_plurals({ONE: master.text, OTHER: master.plural_text})
def export_translation_message(trans, only_used=False):
#FIXME: this is only needed as long as we keep both `plurals` and `translated_text`+`plural_texts`
if not trans.plurals:
return _icu_encode(trans.translated_text)
lookup_fun = get_rules_for_language(trans.language_code)
if not trans.master.plural_text:
singular_form = lookup_fun(1)
return _icu_encode(trans.plurals[singular_form])
if only_used:
plurals = dict((form, t) for (form, t) in trans.plurals.iteritems() if form in lookup_fun.plurals_used)
else:
plurals = dict(trans.plurals)
if len(plurals) == 1:
# if there's only one plural form it has to be the singular translation
return _icu_encode(plurals.values()[0])
return _export_plurals(plurals)
def _decode_icu_plurals(data):
""" Parse the ICU encoded plural options into a dictionary.
The _msg_generator is simple tokenizer. Since we know the input is going to be:
"keyword {blah {var} blah} ..." When we encounter an opening brace, we know we just
finished reading a keyword. Inside the translated messages it would be enough to
count opening and closing brackets, but since we know there can be at most two levels (msg + variables)
we do the extra error checking (too many curly brace levels).
Once the input is tokenized into keywords and translation messages, we validate the keywords.
They must either be in ICU_KEYWORDS, or "=" + <number>.
"""
result = {}
OUTER, TRANS, TRANS_VAR = 0,1,2 # the possible nested braces levels
def _msg_generator(chars):
brace_level = 0
buf = []
for x in data:
buf.append(x)
if x == '{':
# start buffering the translation
brace_level += 1
if brace_level > TRANS_VAR:
raise ValueError('Too many curly brace levels')
if brace_level == TRANS:
# yield a keyword and start buffering a translation
buf.pop()
yield True, ''.join(buf).strip()
buf = []
if x == '}':
brace_level -= 1
if brace_level < OUTER:
raise ValueError('Unexpected %s' % x)
if brace_level == OUTER:
# A translation just ended, yield it and start buffering another keyword
buf.pop()
yield False, ''.join(buf)
buf = []
if brace_level != OUTER:
raise ValueError('Mismatched { } braces')
last_keyword = None
for is_keyword, token in _msg_generator(data):
if not is_keyword:
if last_keyword is None:
raise ValueError('Expected a keyword')
result[last_keyword] = _icu_decode(token)
last_keyword = None
else:
if token[0] == '=':
try:
# We attempt to parse as decimal to make sure it's a number
last_keyword = "=%s" % Decimal(token[1:])
except InvalidOperation:
raise ValueError('Expected keyword: "=<number>", got: %s' % token)
else:
if token not in ICU_KEYWORDS:
raise ValueError('Expected %s or "=<number", got: "%s"' % (', '.join(_icu_kw), token))
last_keyword = ICU_KEYWORDS[token]
return result
def import_icu_message(msg, language=None):
""" Decode the ICU message into a plurals dict. """
if RE_ICU_MSG.match(msg):
plural_form = get_plural_index(language, 1) if language else ONE
return {plural_form: _icu_decode(msg)}
# If the msg doesn't match a direct singular translation, attempt to decode as a plurals dict:
match = RE_ICU_PLURAL_MSG.match(msg)
data = match and match.group('plurals')
if not data:
raise ValueError('Incorrect ICU translation encoding')
return _decode_icu_plurals(data)
| StarcoderdataPython |
12818092 | <reponame>zorache/ServiceX_App<gh_stars>0
# Copyright (c) 2019, IRIS-HEP
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# * Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
from flask import current_app
from servicex.models import TransformRequest, db
from servicex.resources.servicex_resource import ServiceXResource
class TransformStart(ServiceXResource):
@classmethod
def make_api(cls, transformer_manager):
"""Initializes the transformer manage for this resource."""
cls.transformer_manager = transformer_manager
return cls
def post(self, request_id):
"""
Starts a transformation request, deploys transformers, and updates record.
:param request_id: UUID of transformation request.
"""
from servicex.kafka_topic_manager import KafkaTopicManager
submitted_request = TransformRequest.return_request(request_id)
submitted_request.status = 'Running'
submitted_request.save_to_db()
db.session.commit()
if current_app.config['TRANSFORMER_MANAGER_ENABLED']:
if submitted_request.result_destination == 'kafka':
# Setup the kafka topic with the correct number of partitions and max
# message size
max_message_size = 1920000
kafka = KafkaTopicManager(submitted_request.kafka_broker)
kafka.create_topic(request_id,
max_message_size=max_message_size,
num_partitions=100)
rabbitmq_uri = current_app.config['TRANSFORMER_RABBIT_MQ_URL']
namepsace = current_app.config['TRANSFORMER_NAMESPACE']
x509_secret = current_app.config['TRANSFORMER_X509_SECRET']
generated_code_cm = submitted_request.generated_code_cm
self.transformer_manager.launch_transformer_jobs(
image=submitted_request.image, request_id=request_id,
workers=submitted_request.workers,
chunk_size=submitted_request.chunk_size, rabbitmq_uri=rabbitmq_uri,
namespace=namepsace,
x509_secret=x509_secret,
generated_code_cm=generated_code_cm,
result_destination=submitted_request.result_destination,
result_format=submitted_request.result_format,
kafka_broker=submitted_request.kafka_broker)
| StarcoderdataPython |
5177054 | from mopy.impl.dvonn.state import Cell
from mopy.impl.dvonn.game import DvonnGame
import pytest
@pytest.fixture
def game(scope="module"):
return DvonnGame()
@pytest.fixture
def new_state(game):
return game.new_game()
@pytest.fixture
def full_state(new_state):
full_state = new_state
grid = full_state.board.grid
# Populate the board
for x, row in enumerate(grid):
for y, cell in enumerate(row):
cell.owner = Cell.Owner.WHITE if y % 2 == 0 else Cell.Owner.BLACK
if cell.is_owned_by(y % 2):
cell.num_white_rings = 1
else:
cell.num_black_rings = 1
# Place the red rings
for x, y in [(2, 0), (3, 5), (0, 9)]:
grid[x][y].owner = Cell.Owner.RED
grid[x][y].num_dvonn_rings = 1
for n in range(2):
full_state.players[n].num_player_rings = 0
full_state.players[n].num_dvonn_rings = 0
return full_state
@pytest.mark.parametrize("test_cell, test_dist, expected", [
(Cell(0, 0), 1, [(0, 3), (1, 2), (1, 1), (0, 1), (-1, 2), (-1, 3)]),
(Cell(2, 2), 1, [(1, 4), (1, 5), (2, 5), (3, 4), (3, 3), (2, 3)]),
(Cell(-1, 3), 2, [(1, 1), (1, 3), (3, 3), (5, 1), (5, -1), (3, -1)]),
(Cell(8, 0), 3, [(-3, 10), (-3, 13), (0, 13), (3, 10), (3, 7), (0, 7)])
])
def test_grid_neighbour_positions(test_cell, test_dist, expected):
neighbours = test_cell.grid_neighbour_positions(test_dist)
assert all(e in neighbours for e in expected)
def test_ring_removal(full_state):
board = full_state.board
# Remove rings to isolate lower right corner
for x, y in [(0, 10), (1, 9), (2, 8), (3, 7), (4, 6)]:
visited = [[False for cell in row] for row in board.grid]
assert not board._is_isolated_component(x, y, visited)
cell = board.grid[x][y]
cell.num_white_rings = 0
cell.num_black_rings = 0
cell.owner = Cell.Owner.EMPTY
cell.num_dvonn_rings = 0
# Remove the newly isolated group of rings
visited = [[False for cell in row] for row in board.grid]
assert(board._is_isolated_component(2, 9, visited))
board.remove_isolated_rings()
for x, y in [(1, 10), (2, 9), (3, 8), (4, 7), (2, 10), (3, 9), (4, 8)]:
cell = board.grid[x][y]
assert cell.owner == Cell.Owner.EMPTY
assert cell.num_white_rings == 0
assert cell.num_black_rings == 0
assert cell.num_dvonn_rings == 0
@pytest.mark.parametrize("test_cell, expected", [
(Cell(0, 0), False),
(Cell(0, 1), True),
(Cell(-2, 2), False),
(Cell(2, 4), False),
(Cell(4, 3), True),
(Cell(8, 0), False),
(Cell(2, 2), True),
(Cell(8, 2), False),
(Cell(2, 3), True)
])
def test_surround(full_state, test_cell, expected):
assert full_state.board.is_surrounded(test_cell) == expected
| StarcoderdataPython |
6692850 | <reponame>Unknoob/buck
# Copyright 2014 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from __future__ import absolute_import, print_function
import itertools
import os
import site
import sys
import uuid
from pkg_resources import (
DistributionNotFound,
Environment,
Requirement,
WorkingSet,
find_distributions
)
from .common import die, open_zip, safe_mkdir, safe_rmtree
from .interpreter import PythonInterpreter
from .package import distribution_compatible
from .pex_builder import PEXBuilder
from .pex_info import PexInfo
from .tracer import TRACER
from .util import CacheHelper, DistributionHelper
class PEXEnvironment(Environment):
@classmethod
def force_local(cls, pex, pex_info):
if pex_info.code_hash is None:
# Do not support force_local if code_hash is not set. (It should always be set.)
return pex
explode_dir = os.path.join(pex_info.zip_unsafe_cache, pex_info.code_hash)
TRACER.log('PEX is not zip safe, exploding to %s' % explode_dir)
if not os.path.exists(explode_dir):
explode_tmp = explode_dir + '.' + uuid.uuid4().hex
with TRACER.timed('Unzipping %s' % pex):
try:
safe_mkdir(explode_tmp)
with open_zip(pex) as pex_zip:
pex_files = (x for x in pex_zip.namelist()
if not x.startswith(PEXBuilder.BOOTSTRAP_DIR) and
not x.startswith(PexInfo.INTERNAL_CACHE))
pex_zip.extractall(explode_tmp, pex_files)
except: # noqa: T803
safe_rmtree(explode_tmp)
raise
TRACER.log('Renaming %s to %s' % (explode_tmp, explode_dir))
os.rename(explode_tmp, explode_dir)
return explode_dir
@classmethod
def update_module_paths(cls, new_code_path):
# Force subsequent imports to come from the .pex directory rather than the .pex file.
TRACER.log('Adding to the head of sys.path: %s' % new_code_path)
sys.path.insert(0, new_code_path)
for name, module in sys.modules.items():
if hasattr(module, "__path__"):
module_dir = os.path.join(new_code_path, *name.split("."))
TRACER.log('Adding to the head of %s.__path__: %s' % (module.__name__, module_dir))
module.__path__.insert(0, module_dir)
@classmethod
def write_zipped_internal_cache(cls, pex, pex_info):
prefix_length = len(pex_info.internal_cache) + 1
existing_cached_distributions = []
newly_cached_distributions = []
zip_safe_distributions = []
with open_zip(pex) as zf:
# Distribution names are the first element after ".deps/" and before the next "/"
distribution_names = set(filter(None, (filename[prefix_length:].split('/')[0]
for filename in zf.namelist() if filename.startswith(pex_info.internal_cache))))
# Create Distribution objects from these, and possibly write to disk if necessary.
for distribution_name in distribution_names:
internal_dist_path = '/'.join([pex_info.internal_cache, distribution_name])
# First check if this is already cached
dist_digest = pex_info.distributions.get(distribution_name) or CacheHelper.zip_hash(
zf, internal_dist_path)
cached_location = os.path.join(pex_info.install_cache, '%s.%s' % (
distribution_name, dist_digest))
if os.path.exists(cached_location):
dist = DistributionHelper.distribution_from_path(cached_location)
existing_cached_distributions.append(dist)
continue
else:
dist = DistributionHelper.distribution_from_path(os.path.join(pex, internal_dist_path))
if DistributionHelper.zipsafe(dist) and not pex_info.always_write_cache:
zip_safe_distributions.append(dist)
continue
with TRACER.timed('Caching %s' % dist):
newly_cached_distributions.append(
CacheHelper.cache_distribution(zf, internal_dist_path, cached_location))
return existing_cached_distributions, newly_cached_distributions, zip_safe_distributions
@classmethod
def load_internal_cache(cls, pex, pex_info):
"""Possibly cache out the internal cache."""
internal_cache = os.path.join(pex, pex_info.internal_cache)
with TRACER.timed('Searching dependency cache: %s' % internal_cache, V=2):
if os.path.isdir(pex):
for dist in find_distributions(internal_cache):
yield dist
else:
for dist in itertools.chain(*cls.write_zipped_internal_cache(pex, pex_info)):
yield dist
def __init__(self, pex, pex_info, interpreter=None, **kw):
self._internal_cache = os.path.join(pex, pex_info.internal_cache)
self._pex = pex
self._pex_info = pex_info
self._activated = False
self._working_set = None
self._interpreter = interpreter or PythonInterpreter.get()
super(PEXEnvironment, self).__init__(
search_path=sys.path if pex_info.inherit_path else [], **kw)
def update_candidate_distributions(self, distribution_iter):
for dist in distribution_iter:
if self.can_add(dist):
with TRACER.timed('Adding %s' % dist, V=2):
self.add(dist)
def can_add(self, dist):
return distribution_compatible(dist, self._interpreter, self.platform)
def activate(self):
if not self._activated:
with TRACER.timed('Activating PEX virtual environment from %s' % self._pex):
self._working_set = self._activate()
self._activated = True
return self._working_set
def _resolve(self, working_set, reqs):
reqs = reqs[:]
unresolved_reqs = set()
resolveds = set()
# Resolve them one at a time so that we can figure out which ones we need to elide should
# there be an interpreter incompatibility.
for req in reqs:
with TRACER.timed('Resolving %s' % req, V=2):
try:
resolveds.update(working_set.resolve([req], env=self))
except DistributionNotFound as e:
TRACER.log('Failed to resolve a requirement: %s' % e)
unresolved_reqs.add(e.args[0].project_name)
# Older versions of pkg_resources just call `DistributionNotFound(req)` instead of the
# modern `DistributionNotFound(req, requirers)` and so we may not have the 2nd requirers
# slot at all.
if len(e.args) >= 2 and e.args[1]:
unresolved_reqs.update(e.args[1])
unresolved_reqs = set([req.lower() for req in unresolved_reqs])
if unresolved_reqs:
TRACER.log('Unresolved requirements:')
for req in unresolved_reqs:
TRACER.log(' - %s' % req)
TRACER.log('Distributions contained within this pex:')
if not self._pex_info.distributions:
TRACER.log(' None')
else:
for dist in self._pex_info.distributions:
TRACER.log(' - %s' % dist)
if not self._pex_info.ignore_errors:
die('Failed to execute PEX file, missing compatible dependencies for:\n%s' % (
'\n'.join(map(str, unresolved_reqs))))
return resolveds
def _activate(self):
self.update_candidate_distributions(self.load_internal_cache(self._pex, self._pex_info))
if not self._pex_info.zip_safe and os.path.isfile(self._pex):
self.update_module_paths(self.force_local(self._pex, self._pex_info))
all_reqs = [Requirement.parse(req) for req in self._pex_info.requirements]
working_set = WorkingSet([])
resolved = self._resolve(working_set, all_reqs)
for dist in resolved:
with TRACER.timed('Activating %s' % dist, V=2):
working_set.add(dist)
if os.path.isdir(dist.location):
with TRACER.timed('Adding sitedir', V=2):
site.addsitedir(dist.location)
dist.activate()
return working_set
| StarcoderdataPython |
9700886 | <filename>server/database.py
import os
from datetime import datetime
from flask import Flask
from flask_sqlalchemy import SQLAlchemy
from dotenv import load_dotenv
dotenv_path = os.path.join(os.path.dirname(__file__), '.env')
load_dotenv(dotenv_path)
DB_USER = os.getenv('DB_USER')
DB_PW = os.getenv('DB_PW')
app = Flask(__name__)
app.config['SQLALCHEMY_TRACK_MODIFICATIONS'] = False
app.config['SQLALCHEMY_DATABASE_URI'] = f'postgresql://{DB_USER}:{DB_PW}@localhost:5432/hhdb'
db = SQLAlchemy(app)
class User(db.Model):
id = db.Column(db.Integer, primary_key=True)
username = db.Column(db.String(80), unique=True, nullable=False)
email = db.Column(db.String(120), unique=True, nullable=False)
def __repr__(self):
return f'User {self.username}'
class List(db.Model):
id = db.Column(db.Integer, primary_key=True, nullable=False)
name = db.Column(db.String(100), nullable=False)
user_id = db.Column(db.Integer, db.ForeignKey('user.id'), nullable=False)
user = db.relationship('User', backref=db.backref('lists', lazy=True))
def __repr__(self):
return f'<List {self.name}>'
class Task(db.Model):
id = db.Column(db.Integer, primary_key=True, nullable=False)
title = db.Column(db.String(50), nullable=False)
description = db.Column(db.String(250))
completed = db.Column(db.Boolean)
due_date = db.Column(db.Date, default=datetime.utcnow)
list_id = db.Column(db.Integer, db.ForeignKey('list.id'), nullable=False)
list = db.relationship('List', backref=db.backref('tasks', lazy=True))
def __repr__(self):
return f'<Task {self.title}>'
| StarcoderdataPython |
8069333 | from __future__ import absolute_import
from __future__ import print_function
import argparse
from collections import defaultdict
import json
import requests
from memoized import memoized_property
from .utils import humansize, get_arg_parser, get_config_from_args, check_connection, \
get_db_list, get_db_metadata, get_shard_allocation, do_couch_request, put_shard_allocation
from .describe import print_shard_table
from .file_plan import read_plan_file
from .doc_models import ShardAllocationDoc, AllocationSpec
from six.moves import range
class _NodeAllocation(object):
def __init__(self, i, size, shards):
self.i = i
self.size = size
self.shards = shards
def as_tuple(self):
return self.i, self.size, self.shards
def __eq__(self, other):
return self.as_tuple() == other.as_tuple()
def __repr__(self):
return '_NodeAllocation({self.i!r}, {self.size!r}, {self.shards!r})'.format(self=self)
def suggest_shard_allocation(shard_sizes, n_nodes, n_copies, existing_allocation=None):
return Allocator(shard_sizes, n_nodes, n_copies, existing_allocation).suggest_shard_allocation()
class Allocator(object):
def __init__(self, shard_sizes, n_nodes, n_copies, existing_allocation=None):
self.shard_sizes = shard_sizes
self.n_nodes = n_nodes
self.n_copies = n_copies
self.existing_allocation = existing_allocation or ([set()] * self.n_nodes)
self.nodes = [_NodeAllocation(i, 0, []) for i in range(self.n_nodes)]
self._average_size = sum([size for size, _ in shard_sizes]) * n_copies * 1.0 / n_nodes
self._copies_still_in_original_location_by_shard = defaultdict(int)
for shards in self.existing_allocation:
for shard in shards:
self._copies_still_in_original_location_by_shard[shard] += 1
def suggest_shard_allocation(self):
# First distribute, preferring shards' current locations
for shard in self._get_shard_sizes_largest_to_smallest():
for node in self._select_shard_locations(shard):
self._add_shard_to_node(node, shard)
# Then rebalance
self._rebalance_nodes()
return self.nodes
def _get_shard_sizes_largest_to_smallest(self):
return [shard for _, shard in reversed(sorted(self.shard_sizes))]
def _select_shard_locations(self, shard):
"""
Selects best location for n_copies of a given shard, based the allocation so far
preferring a shard's existing locations
returns a list of nodes (_NodeAllocation) that has length n_copies
"""
return sorted(
self.nodes,
key=lambda node: (shard not in self.existing_allocation[node.i], node.size)
)[:self.n_copies]
@memoized_property
def _sizes_by_shard(self):
return {shard: size for size, shard in self.shard_sizes}
def _add_shard_to_node(self, node, shard):
node.shards.append(shard)
node.size += self._sizes_by_shard[shard]
def _rebalance_nodes(self):
larger_nodes, smaller_nodes = self._split_nodes_by_under_allocated()
if not smaller_nodes:
return
while True:
# Move copies from larger_nodes to smaller_nodes
# until doing so would make a larger node smaller than average_size
# Never move more than half - 1 copies of a shard from their original location
# (as given by existing_allocation)---these are the shard's "pivot locations"
larger_nodes.sort(key=lambda node: node.size, reverse=True)
smallest_node = min(smaller_nodes, key=lambda node: node.size)
if smallest_node.size >= self._average_size:
break
try:
large_node, shard = self._find_shard_to_move(larger_nodes, smallest_node)
except self.NoEligibleMove:
break
else:
self._move_shard(shard, large_node, smallest_node)
def _move_shard(self, shard, node1, node2):
if self._is_original_location(node1, shard):
self._copies_still_in_original_location_by_shard[shard] -= 1
node1.shards.remove(shard)
node1.size -= self._sizes_by_shard[shard]
self._add_shard_to_node(node2, shard)
def _split_nodes_by_under_allocated(self):
"""
Split nodes into okay nodes and under-allocated nodes
Any node whose size is less than half the size of the largest node
is deemed under-allocated
:return: (okay_nodes, under_allocated_nodes)
"""
threshold = max(self.nodes, key=lambda node: node.size).size / 2
return (
[node for node in self.nodes if node.size >= threshold],
[node for node in self.nodes if node.size < threshold]
)
class NoEligibleMove(Exception):
pass
def _find_shard_to_move(self, larger_nodes, smallest_node):
for large_node in larger_nodes:
for shard in large_node.shards:
if shard in smallest_node.shards:
# don't move a shard if a copy of it is already on the target node
continue
if large_node.size - self._sizes_by_shard[shard] < self._average_size:
# don't move a shard if it would make the source node smaller than average
continue
if self._is_original_location(large_node, shard) \
and not self._can_still_move_original_copies(shard):
# don't move a shard if that shard has already had
# the max number of its copies moved
# this is to make sure we have n/2+1 pivot locations for a shard
continue
return large_node, shard
raise self.NoEligibleMove()
def _is_original_location(self, node, shard):
return shard in self.existing_allocation[node.i]
def _can_still_move_original_copies(self, shard):
# unmoved original shards is larger than half of n_copies
return self._copies_still_in_original_location_by_shard[shard] > (self.n_copies / 2 + 1)
def get_db_size(node_details, db_name):
return get_db_metadata(node_details, db_name)['sizes']['file']
def get_view_signature_and_size(node_details, db_name, view_name):
view_info = do_couch_request(
node_details,
'/{db_name}/_design/{view_name}/_info'.format(db_name=db_name, view_name=view_name)
)
return view_info['view_index']['signature'], view_info['view_index']['sizes']['file']
def get_views_list(node_details, db_name):
view_response = do_couch_request(
node_details,
'/{db_name}/_all_docs?startkey="_design%2F"&endkey="_design0"'.format(db_name=db_name)
)
return [row['id'][len('_design/'):] for row in view_response['rows'] if row['id'].startswith('_design/')]
def get_db_info(config):
import gevent
processes = []
node_details = config.get_control_node()
db_names = get_db_list(node_details)
db_sizes = {}
db_shards = {}
shard_allocation_docs = {}
view_sizes = defaultdict(dict)
def _gather_db_size(db_name):
db_sizes[db_name] = get_db_size(node_details, db_name)
def _gather_db_shard_names(db_name):
doc = get_shard_allocation(config, db_name)
shard_allocation_docs[db_name] = doc
db_shards[db_name] = sorted(doc.by_range)
def _gather_view_size(db_name, view_name):
signature, size = get_view_signature_and_size(node_details, db_name, view_name)
view_sizes[db_name][signature] = (view_name, size)
def _gather_view_sizes(db_name):
subprocesses = []
for view_name in get_views_list(node_details, db_name):
# _gather_view_size(db_name, view_name)
subprocesses.append(gevent.spawn(_gather_view_size, db_name, view_name))
gevent.joinall(subprocesses, raise_error=True)
processes.extend([gevent.spawn(_gather_view_sizes, db_name) for db_name in db_names])
processes.extend([gevent.spawn(_gather_db_size, db_name) for db_name in db_names])
processes.extend([gevent.spawn(_gather_db_shard_names, db_name) for db_name in db_names])
gevent.joinall(processes, raise_error=True)
view_sizes = {db_name: {name: size for name, size in view_sizes[db_name].values()}
for db_name in db_names}
return [(db_name, db_sizes[db_name], view_sizes[db_name], db_shards[db_name], shard_allocation_docs[db_name])
for db_name in db_names]
def print_db_info(config):
"""
Print table of <db name> <disk size (not including views)> <number of shards>
:return:
"""
info = sorted(get_db_info(config))
row = u"{: <30}\t{: <20}\t{: <20}\t{: <20}"
print(row.format(u"Database", u"Data size on Disk", u"View size on Disk", u"Number of shards"))
for db_name, size, view_sizes, shards, _ in info:
print(row.format(
db_name,
humansize(size),
humansize(sum([view_size for view_name, view_size in view_sizes.items()])),
len(shards)
))
def get_shard_sizes(db_info, databases):
return [
(1.0 * sum([size] + list(views_size.values())) / len(shards), (shard_name, db_name))
for db_name, size, views_size, shards, _ in db_info
for shard_name in shards if db_name in databases
]
def normalize_allocation_specs(db_info, allocation_specs):
"""
Modify allocation_specs in place to explicitly fill in database
An allocation spec without explicit databases is assigned all databases
not mentioned elsewhere.
"""
db_names = {db_name for db_name, _, _, _, _ in db_info}
mentioned_dbs = {db_name for allocation in allocation_specs
for db_name in (allocation.databases if allocation.databases else [])}
unmentioned_dbs = list(db_names - mentioned_dbs)
for allocation in allocation_specs:
if allocation.databases is None:
allocation.databases = list(unmentioned_dbs)
def get_existing_shard_allocation(db_info, databases, nodes):
return [
{
(shard_name, db_name)
for db_name, _, _, _, shard_allocation_doc in db_info if db_name in databases
for shard_name in shard_allocation_doc.by_node.get(node, [])
}
for node in nodes
]
def make_suggested_allocation_by_db(config, db_info, allocation_specs):
suggested_allocation_by_db = defaultdict(list)
normalize_allocation_specs(db_info, allocation_specs)
for allocation in allocation_specs:
existing_allocation = get_existing_shard_allocation(db_info, allocation.databases, allocation.nodes)
suggested_shard_allocation = suggest_shard_allocation(
get_shard_sizes(db_info, allocation.databases), len(allocation.nodes), allocation.copies,
existing_allocation=existing_allocation
)
for node_allocation in suggested_shard_allocation:
print("{}\t{}".format(config.format_node_name(allocation.nodes[node_allocation.i]), humansize(node_allocation.size)))
for shard_name, db_name in node_allocation.shards:
suggested_allocation_by_db[db_name].append((allocation.nodes[node_allocation.i], shard_name))
shard_allocations_docs = {
db_name: shard_allocation_doc
for db_name, _, _, _, shard_allocation_doc in db_info
}
suggested_allocation_docs_by_db = {}
for db_name, allocation in suggested_allocation_by_db.items():
by_range = defaultdict(list)
for node, shard in allocation:
by_range[shard].append(node)
doc = ShardAllocationDoc(_id=db_name, shard_suffix=shard_allocations_docs[db_name].shard_suffix)
doc.populate_from_range(by_range)
suggested_allocation_docs_by_db[db_name] = doc
return suggested_allocation_docs_by_db
def apply_suggested_allocation(shard_allocations, plan):
for shard_allocation_doc in shard_allocations:
# have both a set for set operations and a list to preserve order
# preserving order is useful for presenting things back to the user
# based on the order they gave them
db_name = shard_allocation_doc.db_name
suggested_allocation = plan[db_name]
assert suggested_allocation.validate_allocation()
suggested_allocation_set = {(node, shard)
for shard, nodes in suggested_allocation.by_range.items()
for node in nodes}
current_allocation_set = {(node, shard)
for shard, nodes in shard_allocation_doc.by_range.items()
for node in nodes}
shard_allocation_doc.by_range = suggested_allocation.by_range
shard_allocation_doc.by_node = suggested_allocation.by_node
shard_allocation_doc.changelog.extend([
["add", shard, node]
for node, shard in suggested_allocation_set - current_allocation_set
])
shard_allocation_doc.changelog.extend([
["delete", shard, node]
for node, shard in current_allocation_set - suggested_allocation_set
])
if shard_allocation_doc.shard_suffix:
assert shard_allocation_doc.shard_suffix == suggested_allocation.shard_suffix
else:
shard_allocation_doc.shard_suffix = suggested_allocation.shard_suffix
assert shard_allocation_doc.validate_allocation()
return shard_allocations
def main():
parser = get_arg_parser(u'Suggest shard allocation for a cluster')
group = parser.add_mutually_exclusive_group()
group.add_argument('--allocate', dest='allocation', nargs="+",
help='List of nodes and how many copies you want on them, '
'like node1,node2,node3:<ncopies>[:db1,db2] [...]')
group.add_argument('--from-plan', dest='plan_file',
help=u'Get target shard allocation from plan file.')
parser.add_argument('--save-plan', dest='save_to_plan_file', required=False,
help='Save this plan to a file for use later.')
parser.add_argument('--commit-to-couchdb', dest='commit', action='store_true', required=False,
help='Save the suggested allocation directly to couchdb, '
'changing the live shard allocation.')
parser.add_argument('--create-missing-databases', dest='create', action='store_true', required=False,
help="Create databases in the cluster if they don't exist.")
args = parser.parse_args()
config = get_config_from_args(args)
node_details = config.get_control_node()
check_connection(node_details)
if args.save_to_plan_file and args.plan_file:
# this probably isn't the intended use of this exception
# but makes it clear enough to the caller at this point.
raise argparse.ArgumentError(None, "You cannot use --save-plan with --from-plan.")
if args.allocation:
shard_allocations = generate_shard_allocation(config, args.allocation)
else:
plan = read_plan_file(args.plan_file)
create = args.create
shard_allocations = get_shard_allocation_from_plan(config, plan, create)
print_shard_table([shard_allocation_doc for shard_allocation_doc in shard_allocations])
if args.save_to_plan_file:
with open(args.save_to_plan_file, 'w') as f:
json.dump({shard_allocation_doc.db_name: shard_allocation_doc.to_plan_json()
for shard_allocation_doc in shard_allocations}, f)
if args.commit:
for shard_allocation_doc in shard_allocations:
db_name = shard_allocation_doc.db_name
try:
print(put_shard_allocation(config, shard_allocation_doc))
except requests.exceptions.HTTPError as e:
if db_name.startswith('_') and e.response.json().get('error') == 'illegal_docid':
print("Skipping {} (error response was {})".format(db_name, e.response.json()))
else:
raise
def get_shard_allocation_from_plan(config, plan, create=False):
shard_allocations_docs = [get_shard_allocation(config, db_name, create) for db_name in plan]
shard_allocations = apply_suggested_allocation(
shard_allocations_docs, plan
)
return shard_allocations
def parse_allocation_line(config, allocation_line):
try:
nodes, copies, databases = allocation_line.split(':')
except ValueError:
nodes, copies = allocation_line.split(':')
databases = None
nodes = [config.get_formal_node_name(node) for node in nodes.split(',')]
copies = int(copies)
if databases:
databases = databases.split(',')
return AllocationSpec(
nodes=nodes,
copies=copies,
databases=databases,
)
def generate_shard_allocation(config, allocation):
allocation = [
parse_allocation_line(config, allocation_line) for allocation_line in allocation
]
db_info = get_db_info(config)
shard_allocations_docs = [shard_allocation_doc
for _, _, _, _, shard_allocation_doc in db_info]
shard_allocations = apply_suggested_allocation(
shard_allocations_docs,
make_suggested_allocation_by_db(config, db_info, allocation)
)
return shard_allocations
if __name__ == '__main__':
from gevent import monkey; monkey.patch_all()
main()
| StarcoderdataPython |
6585996 | import math
import docplex.cp.model as cp
def simultaneousGroups(model, AAdict1, AAdict2):
numberOfDivisions1 = len(AAdict1["divisions"])
numberOfDivisions2 = len(AAdict2["divisions"])
if AAdict1["weekBounds"] == AAdict2["weekBounds"] and numberOfDivisions1 == numberOfDivisions2 and set(
AAdict1["cursus"]) == set(AAdict2["cursus"]) and numberOfDivisions1%2 == 0:
for d in range(numberOfDivisions1):
for v in range(len(AAdict1["divisions"][0])):
model.add(cp.start_at_start(AAdict1["divisions"][d][v], AAdict2["divisions"][numberOfDivisions2 - 1 - d][v]))
else:
print("The 2 AAs don't match (number of divisions or week bounds or cursus).")
def fixedSlots(model, AAdict, fixedDay, fixedSlot, constants):
numberOfIntervalVariables = len(AAdict["divisions"][0])
startWeek = math.floor((AAdict["weekBounds"][0] - 1) / constants["segmentSize"])
endWeek = math.ceil(AAdict["weekBounds"][1] / constants["segmentSize"])
if endWeek - startWeek == numberOfIntervalVariables and 1 <= fixedDay <= constants["days"] and 1 <= fixedSlot <= constants["slots"]:
for index,intervalVariable in enumerate(AAdict["divisions"][0]):
model.add(cp.start_of(intervalVariable) == index * constants["days"] * constants["slots"] + (fixedDay - 1) * constants["slots"] + fixedSlot - 1)
else:
print("The AA doesn't match (incorrect number of interval variables or incorrect day/slot).") | StarcoderdataPython |
6531387 | from django.db import migrations
def operation_move_content_from_documents_to_ocr_app(apps, schema_editor):
DocumentPage = apps.get_model(
app_label='documents', model_name='DocumentPage'
)
DocumentPageContent = apps.get_model(
app_label='ocr', model_name='DocumentPageContent'
)
for document_page in DocumentPage.objects.using(alias=schema_editor.connection.alias).all():
DocumentPageContent.objects.using(alias=schema_editor.connection.alias).create(
document_page=document_page,
content=document_page.content_old or ''
)
class Migration(migrations.Migration):
dependencies = [
('ocr', '0002_documentpagecontent'),
]
operations = [
migrations.RunPython(
code=operation_move_content_from_documents_to_ocr_app
),
]
run_before = [
('documents', '0006_remove_documentpage_content_old'),
]
| StarcoderdataPython |
3516647 | from setuptools import setup, find_packages
from os import path
__version__ = '0.0.3'
here = path.abspath(path.dirname(__file__))
# Get the long description from the README file
with open(path.join(here, 'README.md'), encoding='utf-8') as f:
long_description = f.read()
setup(
name = "cvas",
version = __version__,
author = "<NAME>",
description ="Cross-validation framework",
long_description=long_description,
long_description_content_type='text/markdown',
license = "MIT",
packages=find_packages(exclude=['tests*']),
install_requires=[
'optuna',
'scikit-learn',
'pandas'
]
) | StarcoderdataPython |
8144638 | <gh_stars>0
import pytest
from graphql_relay import to_global_id
from rest_framework.utils import json
@pytest.mark.django_db()
def test_relay_mutation_create_resolver_throttle_classes(
graphql_throttle_resolver_four_client, user_factory
):
user = user_factory()
graphql_throttle_resolver_four_client.force_authenticate(user)
mutation = """
mutation CreateRelayBook($input: CreateRelayBookInput!) {
createRelayBook(input: $input) {
book {
title
}
errors {
field
messages
}
}
}
"""
# Request one, not throttled
response = graphql_throttle_resolver_four_client.execute(
mutation, {"input": {"title": ""}}
)
assert response.status_code == 200
assert json.loads(response.content) == {
"data": {
"createRelayBook": {
"errors": [
{"field": "title", "messages": ["This field may not be blank."]}
],
"book": None,
}
}
}
# Request two, throttled
response = graphql_throttle_resolver_four_client.execute(
mutation, {"input": {"title": ""}}
)
assert response.status_code == 200
assert json.loads(response.content) == {
"data": {"createRelayBook": None},
"errors": [
{
"locations": [{"column": 5, "line": 3}],
"message": "Request was throttled. Expected available in 86400 seconds.",
"path": ["createRelayBook"],
}
],
}
@pytest.mark.django_db()
def test_relay_mutation_update_resolver_throttle_classes(
graphql_throttle_resolver_five_client, user_factory
):
user = user_factory()
graphql_throttle_resolver_five_client.force_authenticate(user)
mutation = """
mutation UpdateRelayBook($input: UpdateRelayBookInput!) {
updateRelayBook(input: $input) {
errors {
field
messages
}
}
}
"""
# Request one, not throttled
response = graphql_throttle_resolver_five_client.execute(
mutation, {"input": {"id": to_global_id("BookType", 1), "title": ""}}
)
assert response.status_code == 200
assert json.loads(response.content) == {
"data": {"updateRelayBook": None},
"errors": [
{
"locations": [{"column": 5, "line": 3}],
"message": "No Book matches the given query.",
"path": ["updateRelayBook"],
}
],
}
# Request two, throttled
response = graphql_throttle_resolver_five_client.execute(
mutation, {"input": {"id": to_global_id("BookType", 1), "title": ""}}
)
assert response.status_code == 200
assert json.loads(response.content) == {
"data": {"updateRelayBook": None},
"errors": [
{
"locations": [{"column": 5, "line": 3}],
"message": "Request was throttled. Expected available in 86400 seconds.",
"path": ["updateRelayBook"],
}
],
}
| StarcoderdataPython |
6497234 | # -*- coding: utf-8 -*-
__copyright__ = "Copyright (c) 2014-2017 Agora.io, Inc."
import os
import sys
import time
sys.path.insert(0, os.path.abspath(os.path.join(os.path.dirname(__file__), '..')))
from src.RtmTokenBuilder import *
def main():
appID = "970CA35de60c44645bbae8a215061b33"
appCertificate = "5CFd2fd1755d40ecb72977518be15d3b"
user = "test_user_id"
expirationTimeInSeconds = 3600
currentTimestamp = int(time.time())
privilegeExpiredTs = currentTimestamp + expirationTimeInSeconds
token = RtmTokenBuilder.buildToken(appID, appCertificate, user, Role_Rtm_User, privilegeExpiredTs)
print("Rtm Token: {}".format(token))
if __name__ == "__main__":
main()
| StarcoderdataPython |
4882876 | <reponame>TomLXXVI/pyhc<gh_stars>1-10
"""Calculation of heat loss by transmission"""
import cmath
import numpy as np
from nummath import linearsystem
class BuildingElement:
def __init__(self, **kwargs):
"""
Configuration of building element.
Possible values for 'kwargs':
- A area of building element [m^2]
- t thickness of building element [m]
- k coefficient of conduction of building material [W/(m.K)]
- rho mass density of building material [kg/m^3]
- cm specific heat capacity of building material [J/(kg.K)]
- r specific thermal resistance of building element [(m^2.K)/W]
- ca specific thermal capacity [J/(m^2.K)]
- corr_r correction term to be added to 'r' [(m^2.K)/W]
"""
self._A = kwargs['A'] if 'A' in kwargs.keys() else 1.0
self._t = kwargs['t'] if 't' in kwargs.keys() else 0.0
self._k = kwargs['k'] if 'k' in kwargs.keys() else 0.0
self._rho = kwargs['rho'] if 'rho' in kwargs.keys() else 0.0
self._cm = kwargs['cm'] if 'cm' in kwargs.keys() else 0.0
self._corr_r = kwargs['corr_r'] if 'corr_r' in kwargs.keys() else 0.0
self._r = kwargs['r'] if 'r' in kwargs.keys() else 0.0
self._ca = kwargs['ca'] if 'ca' in kwargs.keys() else 0.0
# used for calculation of effective thermal capacity of BuildingPart: the number of layers the building element
# will be divided into
self.num_of_layers = 1.0
@property
def r(self):
"""Specific thermal resistance of building element [(m^2.K)/W]"""
if not self._r:
self._r = self._t / self._k + self._corr_r
return self._r
@property
def u(self):
"""Specific thermal conductance of building element [W/(m^2.K)]"""
return 1.0 / self.r
@property
def U(self):
"""Thermal conductance of building element [W/K]"""
return self.u * self._A
@property
def ca(self):
"""Specific thermal capacity of building element [J/(m^2.K)]"""
if not self._ca:
self._ca = self._rho * self._cm * self._t
return self._ca
@property
def C(self):
"""Thermal capacity of building element [J/K]"""
return self.ca * self._A
@property
def A(self):
"""Area of building element [m^2]"""
return self._A
@property
def thickness(self):
"""Thickness of building element [m]"""
return self._t
class BuildingCompositeElement:
"""Multiple building elements that are parallel connected (laying in the same plane)"""
def __init__(self, *building_elements):
self.building_elements = building_elements
self._r = 0.0
self._ca = 0.0
# used for calculation of effective thermal capacity of BuildingPart: the number of layers the building element
# will be divided into
self.num_of_layers = 1.0
@property
def r(self):
"""Specific thermal resistance of composite building element [(m^2.K)/W]"""
A = 0.0
U = 0.0
for elem in self.building_elements:
A += elem.A
U += elem.U
self._r = A / U
return self._r
@property
def ca(self):
"""Specific thermal capacity of composite building element [J/(m^2.K)]"""
A = 0.0
C = 0.0
for elem in self.building_elements:
A += elem.A
C += elem.C
self._ca = C / A
return self._ca
@property
def thickness(self):
"""
Thickness of the building element [m]
The thickness of the thickest building element is returned.
"""
return max([elem.thickness for elem in self.building_elements])
class BuildingPart:
"""Multiple building elements in series."""
def __init__(self, *building_elements, corr_u=0.0, r_conv_in=0.0, r_conv_out=0.0):
self.building_elements = building_elements
self._r_conv_in = r_conv_in # spec. convection resistance at inside surface of building part [(m^2.K)/W]
self._r_conv_out = r_conv_out # spec. convection resistance at outside surface of building part [(m^2.K)/W]
self._r = 0.0 # spec. thermal resistance of building part [(m^2.K)/W]
self._ca = 0.0 # spec. thermal capacity per unit area of building part [J/(m^2.K)]
self._ca_eff = None # spec. thermal effective capacity per unit area of building part [J/(m^2.K)]
self._t = 0.0 # total thickness of building part [m]
self._corr_u = corr_u # correction term for u-value
self.T_in = 0.0 # temperature at inside of building part [°C]
self.T_out = 0.0 # temperature at outside of building part [°C]
self.A_in = 0.0 # inside surface of building part [m^2]
self.A_out = 0.0 # outside surface of building part [m^2]
def set_temperatures(self, T_inside, T_outside):
"""Set inside and outside temperature at both sides of building part [°C]"""
self.T_in = T_inside
self.T_out = T_outside
def set_areas(self, A_inside, A_outside):
"""Set inside and outside area of building part [m^2]"""
self.A_in = A_inside
self.A_out = A_outside
@property
def r(self):
"""Specific thermal resistance of building part [(m^2.K)/W]"""
self._r = self._r_conv_in
for elem in self.building_elements:
self._r += elem.r
self._r = 1.0 / (1.0 / self._r + self._corr_u)
self._r += self._r_conv_out
return self._r
@property
def u(self):
"""Specific thermal conductance of building part [W/(m^2.K)]"""
return 1.0 / self.r
@property
def ca(self):
"""Specific thermal capacity of building part [J/(m^2.K)]"""
self._ca = 0.0
for elem in self.building_elements:
self._ca += elem.ca
return self._ca
def calculate_effective_capacity(self, T_out_ampl=5.0, T_out_period=24.0):
"""Calculate effective thermal capacity of building part"""
layers = []
for be in self.building_elements:
r_layer = be.r / be.num_of_layers
ca_layer = be.ca / be.num_of_layers
layers.append(BuildingElement(r=r_layer, ca=ca_layer))
r = [self._r_conv_out + 0.5 * layers[0].r]
for i in range(len(layers)):
r.append(0.5 * (layers[i - 1].r + layers[i].r))
r.append(0.5 * layers[-1].r + self._r_conv_in)
w = 2.0 * cmath.pi / (T_out_period * 3600.0)
xc = [1.0 / (w * layers[i].ca * 1.0j) for i in range(len(layers))]
To = cmath.rect(T_out_ampl, -cmath.pi / 2.0)
n = len(layers)
A = np.zeros((2 * n + 1, 2 * n + 1), dtype=complex)
for i, j in zip(range(0, n + 1), range(0, 2 * n + 1, 2)):
A[i, j] = -r[i]
if j < 2 * n:
A[i, j + 1] = -xc[i]
if i > 0 and j > 0:
A[i, j - 1] = xc[i - 1]
for i, j in zip(range(n + 1, 2 * n + 1), range(1, 2 * n, 2)):
A[i, j] = -1.0
A[i, j - 1] = 1.0
A[i, j + 1] = -1.0
B = np.zeros((2 * n + 1, 1), dtype=complex)
B[0] = -To
X = linearsystem.GaussElimin(A, B, pivot_on=True, dtype=complex).solve()
qr = X[-1]
qr_ampl = abs(qr)
qr_phi = cmath.phase(qr)
r_mr = 0.5 * self.r + self._r_conv_in
r_om = 0.5 * self.r + self._r_conv_out
qo_ampl = (abs(To) - r_mr * qr_ampl) / r_om
qo_phi = np.arctan2(
np.sin(np.pi / 2.0 + qr_phi) + qr_ampl * np.sin(qr_phi),
np.cos(np.pi / 2.0 + qr_phi) + qr_ampl * np.cos(qr_phi)
)
qm_ampl = np.sqrt(
(qo_ampl * np.cos(qo_phi) - qr_ampl * np.cos(qr_phi)) ** 2
+ (qo_ampl * np.sin(qo_phi) - qr_ampl * np.sin(qr_phi)) ** 2
)
self._ca_eff = qm_ampl / (w * r_mr * qr_ampl)
@property
def ca_eff(self):
"""
Specific thermal effective capacity of building part [J/(m^2.K)]
If None is returned, call 'calculate_effective_capacity(T_out_ampl=5.0, T_out_period=24.0)' first.
"""
return self._ca_eff
@property
def thickness(self):
"""Thickness of the building part [m]"""
return sum([elem.thickness for elem in self.building_elements])
class Space:
def __init__(self):
self.building_parts = None # a space is surrounded by building parts
self._T_in = 0.0 # inside temperature [°C]
self._T_out = 0.0 # outside temperature = temperature of building environment [°C]
self._Q_tr = 0.0 # transmission heat loss [W]
self._R_tr = 0.0 # global transmission resistance [K/W]
self._C_ra = 0.0 # room air thermal capacity [J/K]
self._C_bm_stat = 0.0 # building mass static thermal capacity [J/K]
self._C_bm_eff = 0.0 # building mass effective thermal capacity [J/K]
self._dim = {'l': 0.0, 'w': 0.0, 'h': 0.0} # room dimensions [m]
def set_temperatures(self, T_inside, T_outside):
"""Set space inside and outside temperature [°C]"""
self._T_in = T_inside
self._T_out = T_outside
def set_dimensions(self, length, width, height):
self._dim['l'] = length
self._dim['w'] = width
self._dim['h'] = height
def set_building_parts(self, *building_parts):
self.building_parts = building_parts
def Q_tr(self):
"""Calculate transmission heat loss of space [W]"""
self._Q_tr = 0.0 # transmission heat loss of space [W]
for bp in self.building_parts:
A_avg = (bp.A_in + bp.A_out) / 2.0
self._Q_tr += bp.u * A_avg * (bp.T_in - bp.T_out)
def R_tr(self):
"""Calculate global transmission heat resistance of space [K/W]"""
self._R_tr = (self._T_in - self._T_out) / self._Q_tr
def C_ra(self):
"""Calculate room air thermal capacity [J/K]"""
V = self._dim['l'] * self._dim['w'] * self._dim['h']
rho_air = 1.205 # [kg/m^3]
c_air = 1005.0 # [J/(kg.K)]
self._C_ra = rho_air * c_air * V
def C_bm_stat(self):
"""Calculate building mass static thermal capacity [J/K]"""
self._C_bm_stat = 0.0
for bp in self.building_parts:
A_avg = (bp.A_in + bp.A_out) / 2.0
self._C_bm_stat += bp.ca * A_avg
def C_bm_eff(self):
"""Calculate building mass effective thermal capacity [J/K]"""
self._C_bm_eff = 0.0
for bp in self.building_parts:
A_avg = (bp.A_in + bp.A_out) / 2.0
self._C_bm_eff += bp.ca_eff * A_avg
def calculate(self):
self.Q_tr()
self.R_tr()
self.C_ra()
self.C_bm_stat()
self.C_bm_eff()
def get_results(self):
return {
'Q_tr': self._Q_tr,
'R_tr': self._R_tr,
'C_ra': self._C_ra,
'C_bm_stat': self._C_bm_stat,
'C_bm_eff': self._C_bm_eff
}
| StarcoderdataPython |
1936737 | # -*- coding: utf-8 -*-
import unittest
import time
import threading
import logging
from click.testing import CliRunner
from nseta.cli.livecli import live_quote, scan, news, top_picks
from nseta.common import urls
from nseta.scanner.stockscanner import *
from baseUnitTest import baseUnitTest
from nseta.scanner.scannerFactory import *
from nseta.common.log import default_logger
class TestLivecli(baseUnitTest):
def setUp(self, redirect_logs=True):
super().setUp()
def test_live_quote(self):
runner = CliRunner()
result = runner.invoke(live_quote, args=['--symbol', 'BANDHANBNK', '-gowvb'])
self.assertEqual(result.exit_code , 0)
self.assertIn('Symbol | BANDHANBNK', result.output, str(result.output))
self.assertIn('Name | Bandhan Bank Limited', result.output, str(result.output))
self.assertIn('ISIN | INE545U01014', result.output, str(result.output))
self.assertIn('Last Updated |', result.output, str(result.output))
self.assertIn('Prev Close', result.output, str(result.output))
self.assertIn('Last Trade Price', result.output, str(result.output))
self.assertIn('Change', result.output, str(result.output))
self.assertIn('% Change', result.output, str(result.output))
self.assertIn('Avg. Price', result.output, str(result.output))
self.assertIn('Open', result.output, str(result.output))
self.assertIn('52 Wk High', result.output, str(result.output))
self.assertIn('Total Traded Volume', result.output, str(result.output))
self.assertIn('% Delivery', result.output, str(result.output))
self.assertIn('Bid Quantity | Bid Price | Offer_Quantity | Offer_Price', result.output, str(result.output))
def test_scan_intraday(self):
runner = CliRunner()
result = runner.invoke(scan, args=['--stocks', 'BANDHANBNK,HDFC', '--intraday', '--indicator', 'all', '--clear'])
self.assertEqual(result.exit_code , 0)
self.assertIn('Intraday scanning finished.', result.output, str(result.output))
def test_scan_intraday_background(self):
s = scannerFactory.scanner(ScannerType.Intraday, ['HDFC'], 'emac', True)
scannerinstance = scanner(indicator='rsi')
result = s.scan_background(scannerinstance, terminate_after_iter=2, wait_time=2)
self.assertEqual(result , 2)
def test_scan_live(self):
runner = CliRunner()
result = runner.invoke(scan, args=['--stocks', 'BANDHANBNK,HDFC', '--live', '--indicator', 'all', '--clear'])
self.assertEqual(result.exit_code , 0)
self.assertIn('Live scanning finished.', result.output, str(result.output))
def test_scan_live_background(self):
s = scannerFactory.scanner(ScannerType.Live, ['HDFC'], 'emac', True)
scannerinstance = scanner(indicator='rsi')
result = s.scan_background(scannerinstance, terminate_after_iter=2, wait_time=2)
self.assertEqual(result , 2)
def test_scan_swing(self):
runner = CliRunner()
result = runner.invoke(scan, args=['--stocks', 'BANDHANBNK,HDFC', '--swing', '--indicator', 'all', '--clear', '--analyse'])
self.assertEqual(result.exit_code , 0)
self.assertIn('Swing scanning finished.', result.output, str(result.output))
def test_scan_swing_background(self):
s = scannerFactory.scanner(ScannerType.Swing, ['HDFC'], 'emac', True)
scannerinstance = scanner(indicator='rsi')
result = s.scan_background(scannerinstance, terminate_after_iter=2, wait_time=0)
self.assertEqual(result , 0)
self.assertFalse(s.background)
def test_scan_volume(self):
runner = CliRunner()
result = runner.invoke(scan, args=['--stocks', 'BANDHANBNK', '--volume', '--clear', '--orderby', 'TDYVol(%)', '--analyse'])
self.assertEqual(result.exit_code , 0)
self.assertIn('Volume scanning finished.', result.output, str(result.output))
def test_scan_volume_intraday(self):
runner = CliRunner()
result = runner.invoke(scan, args=['--stocks', 'BANDHANBNK', '--volume', '--clear', '--orderby', 'TDYVol(%)', '--analyse'])
self.assertEqual(result.exit_code , 0)
self.assertIn('Volume scanning finished.', result.output, str(result.output))
def test_scan_volume_background(self):
s = scannerFactory.scanner(ScannerType.Volume, ['HDFC'], 'emac', True)
scannerinstance = scanner(indicator='rsi')
result = s.scan_background(scannerinstance, terminate_after_iter=2, wait_time=2)
self.assertEqual(result , 2)
def test_live_quote_inputs(self):
runner = CliRunner()
result = runner.invoke(live_quote, args=['-gowvb'])
self.assertEqual(result.exit_code , 0)
self.assertIn('Usage: [OPTIONS]', result.output, str(result.output))
def test_news(self):
runner = CliRunner()
result = runner.invoke(news, args=['--stocks', 'BANDHANBNK,ICICIBANK,ESCORTS,FSL,TCS,OIL,MOIL,ABB,ACC,DLF'])
self.assertEqual(result.exit_code , 0)
self.assertIn('News scanning finished', result.output, str(result.output))
def test_top_picks(self):
runner = CliRunner()
result = runner.invoke(top_picks, args=['--stocks', 'BANDHANBNK,ICICIBANK,ESCORTS,FSL,TCS,OIL,MOIL,ABB,ACC,DLF', '--intraday', '--indicator', 'macd', '--clear'])
self.assertEqual(result.exit_code , 0)
self.assertIn('TopPick scanning finished', result.output, str(result.output))
def test_scan_live_quote_background(self):
scanner = scannerFactory.scanner(ScannerType.Quote)
result = scanner.live_quote_background('HDFC', True, True, True, True, True, terminate_after_iter=2, wait_time=2)
self.assertEqual(result , 2)
def test_scan_inputs(self):
runner = CliRunner()
result = runner.invoke(scan, args=['--stocks', 'BANDHANBNK,HDFC', '--swing', '--intraday', '--indicator', 'all', '--clear'])
self.assertEqual(result.exit_code , 0)
self.assertIn('Choose only one of --live, --intraday, --swing or --volume options.', result.output, str(result.output))
self.assertIn('Usage: [OPTIONS]', result.output, str(result.output))
result = runner.invoke(scan, args=['--stocks', 'BANDHANBNK,HDFC', '--indicator', 'all', '--clear'])
self.assertEqual(result.exit_code , 0)
self.assertIn('Choose at least one of the --live, --intraday, --swing or --volume options.', result.output, str(result.output))
self.assertIn('Usage: [OPTIONS]', result.output, str(result.output))
result = runner.invoke(top_picks, args=['--stocks', 'BANDHANBNK,HDFC', '--indicator', 'all', '--clear'])
self.assertEqual(result.exit_code , 0)
self.assertIn('Choose at least one of the --intraday or --swing options.', result.output, str(result.output))
self.assertIn('Usage: [OPTIONS]', result.output, str(result.output))
result = runner.invoke(top_picks, args=['--stocks', 'BANDHANBNK,HDFC', '--swing', '--intraday', '--indicator', 'all', '--clear'])
self.assertEqual(result.exit_code , 0)
self.assertIn('Choose only one of --intraday or --swing options.', result.output, str(result.output))
self.assertIn('Usage: [OPTIONS]', result.output, str(result.output))
def test_scan_base_background(self):
scanner_type= ScannerType.Intraday
s = scannerFactory.scanner(scanner_type, ['HDFC'], 'rsi', True)
b = threading.Thread(name='scan_test_background',
target=s.scan, args=['Symbol'], daemon=True)
b.start()
time.sleep(0.1)
s.scan_background_interrupt()
b.join()
self.assertIn('This run of {} scan took'.format(scanner_type.name), self.capturedOutput.getvalue())
self.assertIn('Finished all iterations of scanning {}'.format(scanner_type.name), self.capturedOutput.getvalue())
def test_scan_background_None_instance(self):
scanner = scannerFactory.scanner(ScannerType.Intraday)
result = scanner.scan_background(None, terminate_after_iter=2, wait_time=2)
self.assertIn('Finished all iterations of scanning Intraday.', self.capturedOutput.getvalue())
def tearDown(self):
super().tearDown()
if __name__ == '__main__':
suite = unittest.TestLoader().loadTestsFromTestCase(TestLivecli)
result = unittest.TextTestRunner(verbosity=2).run(suite)
if six.PY2:
if result.wasSuccessful():
print('tests OK')
for (test, error) in result.errors:
print('=========Error in: %s===========' % test)
print(error)
print('======================================')
for (test, failures) in result.failures:
print('=========Error in: %s===========' % test)
print(failures)
print('======================================')
| StarcoderdataPython |
9793168 | from transformer.helpers import zip_kv_pairs
from transformer.plugins import plugin, Contract
from transformer.task import Task2
@plugin(Contract.OnTask)
def plugin(task: Task2) -> Task2:
"""
Removes Chrome-specific, RFC-non-compliant headers starting with `:`.
Converts header names to lowercase to simplify further overriding.
Removes the cookie header as it is handled by Locust's HttpSession.
"""
headers = task.request.headers
if not isinstance(headers, dict):
headers = zip_kv_pairs(headers)
sanitized_headers = {
k.lower(): v
for (k, v) in headers.items()
if not k.startswith(":") and k.lower() != "cookie"
}
task.request = task.request._replace(headers=sanitized_headers)
return task
| StarcoderdataPython |
292026 | <gh_stars>1-10
# coding: utf-8
import requests
import datetime
import time
import pandas as pd
import plotly.graph_objects as go
import plotly.express as px
import os
def plot_candlestick(df, df2):
fig = go.Figure(data = [go.Candlestick(x = df['Time'],
open = df['Open'],
high = df['High'],
low = df['Low'],
close = df['Close'])])
for ind in df2.index:
x0 = df2.iloc[ind, 0]
y0 = df2.iloc[ind, 1]
x1 = df2.iloc[ind, 2]
y1 = df2.iloc[ind, 3]
fig.add_shape(type='line', x0= x0, x1 =x1, y0= y0, y1 = y1, line=dict(color='Blue',), xref='x',yref='y')
fig.show()
t = int(input("Write no of test cases:"))
for i in range(0,t):
df = pd.read_csv("cryp" + str(i+1) + ".csv")
df = df.astype(float)
try:
df2 = pd.read_csv("output" + str(i+1) + ".txt", sep=" ", header=None)
df2 = df2.astype(float)
plot_candlestick(df, df2)
except:
print("Sorry No trendlines found for test case no. " + str(i+1))
#print(df)
| StarcoderdataPython |
3426092 | <filename>atm90e26_SPI.py
import machine
import time
import ustruct as struct
SoftReset = 0x00 # Software Reset
SysStatus = 0x01 # System Status
FuncEn = 0x02 # Function Enable
SagTh = 0x03 # Voltage Sag Threshold
SmallPMod = 0x04 # Small-Power Mode
LastData = 0x06 # Last Read/Write SPI/UART Value
LSB = 0x08 # RMS/Power 16-bit LSB
CalStart = 0x20 # Calibration Start Command
PLconstH = 0x21 # High Word of PL_Constant
PLconstL = 0x22 # Low Word of PL_Constant
Lgain = 0x23 # L Line Calibration Gain
Lphi = 0x24 # L Line Calibration Angle
Ngain = 0x25 # N Line Calibration Gain
Nphi = 0x26 # N Line Calibration Angle
PStartTh = 0x27 # Active Startup Power Threshold
PNolTh = 0x28 # Active No-Load Power Threshold
QStartTh = 0x29 # Reactive Startup Power Threshold
QNolTh = 0x2A # Reactive No-Load Power Threshold
MMode = 0x2B # Metering Mode Configuration
CSOne = 0x2C # Checksum 1
AdjStart = 0x30 # Measurement Calibration Start Command
Ugain = 0x31 # Voltage rms Gain
IgainL = 0x32 # L Line Current rms Gain
IgainN = 0x33 # N Line Current rms Gain
Uoffset = 0x34 # Voltage Offset
IoffsetL = 0x35 # L Line Current Offset
IoffsetN = 0x36 # N Line Current Offse
PoffsetL = 0x37 # L Line Active Power Offset
QoffsetL = 0x38 # L Line Reactive Power Offset
PoffsetN = 0x39 # N Line Active Power Offset
QoffsetN = 0x3A # N Line Reactive Power Offset
CSTwo = 0x3B # Checksum 2
APenergy = 0x40 # Forward Active Energy
ANenergy = 0x41 # Reverse Active Energy
ATenergy = 0x42 # Absolute Active Energy
RPenergy = 0x43 # Forward (Inductive) Reactive Energy
Rnenerg = 0x44 # Reverse (Capacitive) Reactive Energy
Rtenergy = 0x45 # Absolute Reactive Energy
EnStatus = 0x46 # Metering Status
Irms = 0x48 # L Line Current rms
Urms = 0x49 # Voltage rms
Pmean = 0x4A # L Line Mean Active Power
Qmean = 0x4B # L Line Mean Reactive Power
Freq = 0x4C # Voltage Frequency
PowerF = 0x4D # L Line Power Factor
Pangle = 0x4E # Phase Angle between Voltage and L Line Current
Smean = 0x4F # L Line Mean Apparent Power
IrmsTwo = 0x68 # N Line Current rms
PmeanTwo = 0x6A # N Line Mean Active Power
QmeanTwo = 0x6B # N Line Mean Reactive Power
PowerFTwo = 0x6D # N Line Power Factor
PangleTwo = 0x6E # Phase Angle between Voltage and N Line Current
SmeanTwo = 0x6F # N Line Mean Apparent Power
class ATM90E26_SPI:
'''
spi - hardware or software SPI implementation
cs - Chip Select pin
'''
def __init__(self, spi, cs):
self.spi = spi
self.cs = cs
self.init_atm90()
'''
rw - True - read, False - write
address - register to operate
val - value to write (if any)
'''
def comm_atm90(self, RW, address, val):
# switch MSB and LSB of value
buf = bytearray(2)
otw_val = struct.pack('>H', val)
# Set read write flag
address |= RW << 7
self.cs.value(False)
time.sleep_us(10)
self.spi.write(bytearray([address]))
''' Must wait 4 us for data to become valid '''
time.sleep_us(4)
# Read data
# Do for each byte in transfer
if(RW):
buf = self.spi.read(2)
else:
self.spi.write(otw_val) # write all the bytes
self.cs.value(1)
return int.from_bytes(buf, 'big')
def init_atm90(self):
self.comm_atm90(False, SoftReset, 0x789A) # Perform soft reset
# Voltage sag irq=1, report on warnout pin=1, energy dir change irq=0
self.comm_atm90(False, FuncEn, 0x0030)
self.comm_atm90(False, SagTh, 0x1F2F) # Voltage sag threshhold
# Set metering calibration values
# Metering calibration startup command. Register 21 to 2B need to be set
self.comm_atm90(False, CalStart, 0x5678)
self.comm_atm90(False, PLconstH, 0x00B9) # PL Constant MSB
self.comm_atm90(False, PLconstL, 0xC1F3) # PL Constant LSB
self.comm_atm90(False, Lgain, 0x1D39) # Line calibration gain
self.comm_atm90(False, Lphi, 0x0000) # Line calibration angle
# Active Startup Power Threshold
self.comm_atm90(False, PStartTh, 0x08BD)
# Active No-Load Power Threshold
self.comm_atm90(False, PNolTh, 0x0000)
# Reactive Startup Power Threshold
self.comm_atm90(False, QStartTh, 0x0AEC)
# Reactive No-Load Power Threshold
self.comm_atm90(False, QNolTh, 0x0000)
# Metering Mode Configuration. All defaults. See pg 31 of datasheet.
self.comm_atm90(False, MMode, 0x9422)
# Write CSOne, as self calculated
self.comm_atm90(False, CSOne, 0x4A34)
print("Checksum 1:")
# Checksum 1. Needs to be calculated based off the above values.
print(hex(self.comm_atm90(True, CSOne, 0x0000)))
# Set measurement calibration values
# Measurement calibration startup command, registers 31-3A
self.comm_atm90(False, AdjStart, 0x5678)
self.comm_atm90(False, Ugain, 0xD464) # Voltage rms gain
self.comm_atm90(False, IgainL, 0x6E49) # L line current gain
self.comm_atm90(False, Uoffset, 0x0000) # Voltage offset
self.comm_atm90(False, IoffsetL, 0x0000) # L line current offset
self.comm_atm90(False, PoffsetL, 0x0000) # L line active power offset
# L line reactive power offset
self.comm_atm90(False, QoffsetL, 0x0000)
# Write CSTwo, as self calculated
self.comm_atm90(False, CSTwo, 0xD294)
print("Checksum 2:")
# Checksum 2. Needs to be calculated based off the above values.
print(hex(self.comm_atm90(True, CSTwo, 0x0000)))
# Checks correctness of 21-2B registers and starts normal metering if ok
self.comm_atm90(False, CalStart, 0x8765)
# Checks correctness of 31-3A registers and starts normal measurement if ok
self.comm_atm90(False, AdjStart, 0x8765)
systemstatus = self.GetSysStatus()
if (systemstatus & 0xC000):
# checksum 1 error
print("Checksum 1 Error!!")
if (systemstatus & 0x3000):
# checksum 2 error
print("Checksum 2 Error!!")
def GetSysStatus(self):
return self.comm_atm90(True, SysStatus, 0x0000)
def GetLineVoltage(self):
voltage = self.comm_atm90(True, Urms, 0xFFFF)
return voltage/100.0
def GetMeterStatus(self):
return self.comm_atm90(True, EnStatus, 0xFFFF)
def GetLineCurrent(self):
current = self.comm_atm90(True, Irms, 0xFFFF)
return current/1000.0
def GetActivePower(self):
# Complement, MSB is signed bit
apower = self.comm_atm90(True, Pmean, 0xFFFF)
return apower/1.0
def GetFrequency(self):
freq = self.comm_atm90(True, Freq, 0xFFFF)
return freq/100.0
def GetPowerFactor(self):
pf = self.comm_atm90(True, PowerF, 0xFFFF) # MSB is signed bit
# if negative
if(pf & 0x8000):
pf = (pf & 0x7FFF)*-1
return pf/1000.0
def GetImportEnergy(self):
# Register is cleared after reading
ienergy = self.comm_atm90(True, APenergy, 0xFFFF)
return ienergy*0.0001 # returns kWh if PL constant set to 1000imp/kWh
def GetExportEnergy(self):
# Register is cleared after reading
eenergy = self.comm_atm90(True, ANenergy, 0xFFFF)
return eenergy*0.0001 # returns kWh if PL constant set to 1000imp/kWh
def test_dual_atm90e26():
sck = machine.Pin(5, machine.Pin.OUT)
mosi = machine.Pin(18, machine.Pin.OUT)
miso = machine.Pin(19, machine.Pin.IN)
cs1 = machine.Pin(15, machine.Pin.OUT)
cs2 = machine.Pin(33, machine.Pin.OUT)
spi = machine.SPI(1, baudrate=200000, bits=8, polarity=1, phase=1,
firstbit=machine.SPI.MSB, sck=sck, mosi=mosi, miso=miso)
all_ics = [ATM90E26_SPI(spi, cs1), ATM90E26_SPI(spi, cs2)]
while True:
for energy_ic in all_ics:
sys_val = energy_ic.GetSysStatus()
print("Sys Status:", hex(sys_val))
met_val = energy_ic.GetMeterStatus()
print("Met Status:", hex(met_val))
voltage = energy_ic.GetLineVoltage()
print("Voltage:", voltage)
current = energy_ic.GetLineCurrent()
print("Current:", current)
time.sleep_ms(1000)
| StarcoderdataPython |
293545 | <gh_stars>0
from collections import defaultdict
import math
import time
import random
import torch
class WordEmbSkip(torch.nn.Module):
def __init__(self, nwords, emb_size):
super(WordEmbSkip, self).__init__()
""" word embeddings """
self.word_embedding = torch.nn.Embedding(nwords, emb_size)
# uniform initialization
torch.nn.init.uniform_(self.word_embedding.weight, -0.25, 0.25)
""" context embeddings"""
self.context_embedding = torch.nn.Parameter(torch.randn(emb_size, nwords))
def forward(self, word):
embed_word = self.word_embedding(word) # 1 * emb_size
out = torch.mm(embed_word, self.context_embedding) # 1 * nwords
return out
N = 2 # length of window on each side (so N=2 gives a total window size of 5, as in t-2 t-1 t t+1 t+2)
EMB_SIZE = 128 # The size of the embedding
embeddings_location = "embeddings.txt" # the file to write the word embeddings to
labels_location = "labels.txt" # the file to write the labels to
# We reuse the data reading from the language modeling class
w2i = defaultdict(lambda: len(w2i))
S = w2i["<s>"]
UNK = w2i["<unk>"]
def read_dataset(filename):
with open(filename, "r") as f:
for line in f:
yield [w2i[x] for x in line.strip().split(" ")]
# Read in the data
train = list(read_dataset("../data/ptb/train.txt"))
w2i = defaultdict(lambda: UNK, w2i)
dev = list(read_dataset("../data/ptb/valid.txt"))
i2w = {v: k for k, v in w2i.items()}
nwords = len(w2i)
with open(labels_location, 'w') as labels_file:
for i in range(nwords):
labels_file.write(i2w[i] + '\n')
# initialize the model
model = WordEmbSkip(nwords, EMB_SIZE)
criterion = torch.nn.CrossEntropyLoss()
optimizer = torch.optim.SGD(model.parameters(), lr=0.1)
type = torch.LongTensor
use_cuda = torch.cuda.is_available()
if use_cuda:
type = torch.cuda.LongTensor
model.cuda()
# Calculate the loss value for the entire sentence
def calc_sent_loss(sent):
# add padding to the sentence equal to the size of the window
# as we need to predict the eos as well, the future window at that point is N past it
# Step through the sentence
losses = []
for i, word in enumerate(sent):
for j in range(1, N + 1):
for direction in [-1, 1]:
c = torch.tensor([word]).type(type) # This is tensor for center word
context_id = sent[i + direction * j] if 0 <= i + direction * j < len(sent) else S
context = torch.tensor([context_id]).type(type) # Tensor for context word
logits = model(c)
loss = criterion(logits, context)
losses.append(loss)
return torch.stack(losses).sum()
MAX_LEN = 100
for ITER in range(100):
print("started iter %r" % ITER)
# Perform training
random.shuffle(train)
train_words, train_loss = 0, 0.0
start = time.time()
model.train()
for sent_id, sent in enumerate(train):
my_loss = calc_sent_loss(sent)
train_loss += my_loss.item()
train_words += len(sent)
# Back prop while training
optimizer.zero_grad()
my_loss.backward()
optimizer.step()
if (sent_id + 1) % 5000 == 0:
print("--finished %r sentences" % (sent_id + 1))
train_ppl = float('inf') if train_loss / train_words > 709 else math.exp(train_loss / train_words)
print("iter %r: train loss/word=%.4f, ppl=%.4f, time=%.2fs" % (
ITER, train_loss / train_words, train_ppl, time.time() - start))
# Evaluate on dev set
dev_words, dev_loss = 0, 0.0
start = time.time()
model.eval()
for sent_id, sent in enumerate(dev):
my_loss = calc_sent_loss(sent)
dev_loss += my_loss.item()
dev_words += len(sent)
dev_ppl = float('inf') if dev_loss / dev_words > 709 else math.exp(dev_loss / dev_words)
print("iter %r: dev loss/word=%.4f, ppl=%.4f, time=%.2fs" % (
ITER, dev_loss / dev_words, dev_ppl, time.time() - start))
print("saving embedding files")
with open(embeddings_location, 'w') as embeddings_file:
W_w_np = model.word_embedding.weight.data.cpu().numpy()
for i in range(nwords):
ith_embedding = '\t'.join(map(str, W_w_np[i]))
embeddings_file.write(ith_embedding + '\n')
| StarcoderdataPython |
3261045 | from datetime import date, datetime
from flask import Blueprint, render_template, abort
from flask_babel import _
from flask_login import current_user
from sqlalchemy import desc
from app import db, cache
from app.models.activity import Activity
from app.models.news import News
from app.models.page import Page, PageRevision
from app.service import page_service
blueprint = Blueprint('home', __name__)
@cache.memoize(timeout=60)
def get_revisions(data):
pages = []
revisions = []
for path in data:
if path == 'activities':
revision = PageRevision(None, None, None, None, None, None, None)
activities = Activity.query \
.filter(Activity.end_time > datetime.now()) \
.order_by(Activity.start_time.asc())
revision.activity = \
render_template('activity/view_simple.htm',
activities=activities.paginate(1, 4, False))
revisions.append(revision)
continue
page = page_service.get_page_by_path(Page.strip_path(path))
pages.append(page)
if not page:
revision = PageRevision(None, None, None, None, None, None, None)
revision.title = _('Not found!')
revision.content = _('Page not found')
revisions.append(revision)
continue
revision = page.get_latest_revision()
revision.test = path
if not revision:
return abort(500)
revisions.append(revision)
return revisions
@blueprint.route('/', methods=['GET'])
def home():
data = ['activities',
'contact']
revisions = get_revisions(data)
news = News.query.filter(News.publish_date <= date.today(),
db.or_(News.archive_date >= date.today(),
News.archive_date == None), # noqa
db.or_(current_user.has_paid,
db.not_(News.needs_paid)))\
.order_by(desc(News.publish_date)).limit(8).all()
return render_template('home/home.htm', revisions=revisions,
title='Homepage', news=news)
| StarcoderdataPython |
11286108 | <reponame>nasqueron/merge-dictionaries<filename>src/mergedictionaries/app/__init__.py
from .app import run
| StarcoderdataPython |
11333756 | import unittest
import rasterio
import numpy as np
from treeseg import *
from pyproj import Proj
import pyfor
from affine import Affine
rasterio_object = rasterio.open('data/test.tif')
array = rasterio_object.read(1)
class HeightModelTestCase(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.test_hm_array = base.HeightModel(array)
cls.crs = Proj({'init': 'epsg:26910'}).srs
cls.pyfor_chm = pyfor.cloud.Cloud('data/test.las').chm(1, interp_method="nearest")
@classmethod
def tearDownClass(cls):
pass
def test_array_load_no_crs_no_affine(self):
# Test default behavior
self.assertEqual(type(self.test_hm_array.array), np.ndarray)
self.assertIsNone(self.test_hm_array.crs)
self.assertIsNone(self.test_hm_array.affine)
def test_array_load_crs_no_affine(self):
hm = base.HeightModel(array, crs={'init': 'epsg:3007'})
self.assertEqual(type(hm.array), np.ndarray)
self.assertIsNone(self.test_hm_array.crs)
self.assertIsNone(self.test_hm_array.affine)
def test_pyfor_load_no_crs_no_affine(self):
hm = base.HeightModel.from_pyfor(self.pyfor_chm)
self.assertIsNone(hm.crs)
self.assertIsNone(hm.affine)
def test_pyfor_load_crs_affine(self):
self.pyfor_chm.crs = self.crs
hm = base.HeightModel.from_pyfor(self.pyfor_chm)
self.assertEqual(type(hm.crs), str)
self.assertEqual(type(hm.affine), Affine)
| StarcoderdataPython |
1745626 | from __future__ import annotations
from dataclasses import make_dataclass
from warnings import warn
import numpy as np
import pandas as pd
import statsmodels.api as sm
from statsmodels.regression.rolling import RollingOLS
from pqr.utils import align
def extract_annualizer(df_or_series: pd.DataFrame | pd.Series) -> float:
freq_alias = {
"A": 1, "AS": 1, "BYS": 1, "BA": 1, "BAS": 1, "RE": 1, # yearly
"Q": 4, "QS": 4, "BQ": 4, "BQS": 4, # quarterly
"M": 12, "MS": 12, "BM": 12, "BMS": 12, "CBM": 12, "CBMS": 12, # monthly
"W": 52, # weekly
"B": 252, "C": 252, "D": 252, # daily
}
if not isinstance(df_or_series.index, pd.DatetimeIndex):
raise TypeError("df or series must have pd.DateTimeIndex to infer periodicity")
idx = df_or_series.index
inferred_freq = getattr(idx, "inferred_freq", None)
annualizer = freq_alias.get(inferred_freq)
if annualizer is None:
warn("periodicity of df or series cannot be determined correctly, estimation is used")
years_approx = (idx[-1] - idx[0]).days / 365.25
annualizer = len(idx) / years_approx
return annualizer
def adjust(
returns: pd.Series,
rf: float | pd.Series
) -> pd.Series:
if isinstance(rf, pd.Series):
returns, rf = align(returns, rf)
return returns - rf
def stats_container_factory(metric_name: str) -> type:
return make_dataclass(
metric_name,
[
("value", float),
("t_stat", float),
("p_value", float),
],
namespace={
"template": property(lambda self: "{value:.2f}{stars} ({t_stat:.2f})"),
"count_stars": lambda self: 3 if self.p_value < 0.01 else (
2 if self.p_value < 0.05 else (
1 if self.p_value < 0.1 else 0
)
)
}
)
def estimate_ols(
returns: pd.Series,
benchmark: pd.Series,
rf: float = 0.0
):
adjusted_returns = adjust(returns, rf)
adjusted_benchmark = adjust(benchmark, rf)
y, x = align(adjusted_returns, adjusted_benchmark)
x = sm.add_constant(x.to_numpy())
ols = sm.OLS(y.to_numpy(), x)
return ols.fit()
def estimate_rolling_ols(
returns: pd.Series,
benchmark: pd.Series,
window: int,
rf: float = 0.0,
):
adjusted_returns = adjust(returns, rf)
adjusted_benchmark = adjust(benchmark, rf)
y, x = align(adjusted_returns, adjusted_benchmark)
x = sm.add_constant(x.to_numpy())
ols = RollingOLS(y.to_numpy(), x, window=window)
return ols.fit()
def estimate_holding(picks: pd.DataFrame) -> int:
diff = np.diff(picks.to_numpy(), axis=0)
rebalancings_long = (diff == 1).any(axis=1).sum()
rebalancings_short = (diff == -1).any(axis=1).sum()
avg_rebalacings = (rebalancings_long + rebalancings_short) / 2
return round(len(diff) / avg_rebalacings)
| StarcoderdataPython |
6678572 | <filename>src/lhtmlLib/insert_in_text.py
import re
def insert_element_from_index(text, regex_to_insert, index_store):
new_text = ''
index_previous = 0
match = re.finditer(regex_to_insert, text)
for it in match:
index = int(it.group(1))
content = index_store[index]
new_text += text[index_previous:it.span()[0]]
new_text += content
index_previous = it.span()[1]
new_text += text[index_previous:]
return new_text
def remove_element_to_index(text, regex_to_remove, name_to_store, index_store):
new_text = ''
index_previous = 0
regex = re.compile(regex_to_remove, re.DOTALL | re.MULTILINE)
match = re.finditer(regex, text)
for it in match:
content_to_remove = text[it.span()[0]:it.span()[1]]
index = len(index_store)
index_store.append(content_to_remove)
# replace content of code
new_text += text[index_previous:it.span()[0]]
new_text += f'{name_to_store}::[{index}]'
index_previous = it.span()[1]
new_text += text[index_previous:]
return new_text | StarcoderdataPython |
3453727 | import os, re, sys, shutil
import subprocess as sp
import random, string
import numpy as np
from .utils import *
#DEBUG=False
# load package locations from yaml file, watch! global dict
package_locs = load_package_locations()
def pfunc(seq, package='vienna_2', T=37,
constraint=None, motif=None, linear=False,
dangles=True, noncanonical=False, pseudo=False, dna=False, DIRLOC=None,
bpps=False, param_file=None, coaxial=True, reweight=None,
return_free_energy = False, beam_size=100, DEBUG=False):
''' Compute partition function for RNA sequence.
Args:
seq (str): nucleic acid sequence
T (float): temperature (Celsius)
constraint (str): structure constraints
motif (str): argument to vienna motif
linear (bool): call LinearPartition to estimate Z in Vienna or Contrafold
pseudo (bool): nupack only, make prediction with pseudoknots
dna (bool): nupack only, make prediction for DNA
dangles (bool): dangles or not, specifiable for vienna, nupack
coaxial (bool): coaxial stacking or not, specifiable for rnastructure, vfold
noncanonical(bool): include noncanonical pairs or not (for contrafold, RNAstructure (Cyclefold))
beam_size (int): beam size option for LinearPartition.
Possible packages:
'vienna_2', 'vienna_1','contrafold_1','contrafold_2','nupack_95','nupack_99','rnasoft_2007','rnasoft_1999','rnastructure','vfold_0','vfold_1'
Returns
float: free energy
'''
try:
pkg, version = package.lower().split('_')
except:
pkg, version = package.lower(), None
if not bpps: # if bpps, already printed these warnings
if not dangles and pkg not in ['vienna', 'nupack']:
print('Warning: %s does not support dangles options' % pkg)
if not coaxial and pkg not in ['rnastructure', 'vfold']:
print('Warning: %s does not support coaxial options' % pkg)
if linear and pkg not in ['vienna','contrafold','eternafold']:
print('Warning: LinearPartition only implemented for vienna, contrafold, eternafold.')
if pkg=='eternafold' and package_locs['eternafoldparams'] is None:
raise RuntimeError('Error: need to set path to EternaFold params to use eternafold hotkey.')
if pseudo and pkg !='nupack':
raise ValueError('pseudo only for use with nupack')
if pkg=='vienna':
if linear:
Z, tmp_file = pfunc_linearpartition_(seq, package='vienna',bpps=bpps, beam_size=beam_size,
return_free_energy=return_free_energy, DEBUG=DEBUG)
else:
Z, tmp_file = pfunc_vienna_(seq, version=version, T=T, dangles=dangles,
constraint=constraint, motif=motif, bpps=bpps, param_file=param_file,
reweight=reweight, return_free_energy=return_free_energy, DEBUG=DEBUG)
elif pkg=='contrafold':
if linear:
Z, tmp_file = pfunc_linearpartition_(seq, package='contrafold', bpps=bpps, beam_size=beam_size,
return_free_energy=return_free_energy, DEBUG=DEBUG)
else:
Z, tmp_file = pfunc_contrafold_(seq, version=version, T=T,
constraint=constraint, bpps=bpps, param_file=param_file, DIRLOC=DIRLOC,
return_free_energy=return_free_energy)
elif pkg=='rnastructure':
Z, tmp_file = pfunc_rnastructure_(seq, version=version, T=T, coaxial=coaxial,
constraint=constraint, bpps=bpps, return_free_energy=return_free_energy, DEBUG=DEBUG)
elif pkg=='rnasoft':
if constraint is not None:
print("ERROR: RNAsoft is unable to handle constraints for calculating \
partition functions, returning unconstrained Z.")
Z, tmp_file = pfunc_rnasoft_(seq, version=version, T=T, constraint=constraint,
bpps=bpps,return_free_energy=return_free_energy, DEBUG=DEBUG)
elif pkg=='nupack':
Z, tmp_file = pfunc_nupack_(seq, version=version, dangles=dangles, T=T, pseudo=pseudo, dna=dna, constraint=constraint,
return_free_energy=return_free_energy, DEBUG=DEBUG)
elif pkg=='vfold':
Z, tmp_file = pfunc_vfold_(seq, version=version, T=T, coaxial=coaxial, DEBUG=DEBUG)
elif pkg=='eternafold':
if linear:
Z, tmp_file = pfunc_linearpartition_(seq, package='eternafold', bpps=bpps, beam_size=beam_size,
return_free_energy=return_free_energy, DEBUG=DEBUG)
else:
Z, tmp_file = pfunc_contrafold_(seq, version=version, T=T, constraint=constraint,
bpps=bpps, param_file=package_locs['eternafoldparams'], DIRLOC=DIRLOC, return_free_energy=return_free_energy, DEBUG=DEBUG)
else:
raise ValueError('package %s not understood.' % package)
if bpps:
return Z, tmp_file
else:
if tmp_file:
if os.path.exists(tmp_file):
os.remove(tmp_file)
return Z
def pfunc_vienna_(seq, T=37, version='2', constraint=None, motif=None, param_file=None,
dangles=True, bpps=False, reweight=None, return_free_energy=False, DEBUG=False):
"""get partition function structure representation and Z
Args:
seq (str): nucleic acid sequence
T (float): temperature
constraint (str): structure constraints
motif (str): argument to vienna motif
Returns
str, float: secondary structure representation and Z
"""
if not version:
version='2'
if version.startswith('2'):
LOC=package_locs['vienna_2']
elif version.startswith('1'):
LOC=package_locs['vienna_1']
else:
raise RuntimeError('Error, vienna version %s not present' % version)
command = ['%s/RNAfold' % LOC, '-p', '-T', str(T)]
if version.startswith('2'):
command.append('--bppmThreshold=0.0000000001')
output_id = local_rand_filename()
output_dot_ps_file = "%s_0001_dp.ps" % output_id
command.append('--id-prefix=%s' % output_id)
else:
output_dot_ps_file = 'dot.ps'
if motif is not None:
command.append("--motif=%s" % motif)
if constraint is not None:
fname = write([seq, constraint])
command.append('-C')
if version=='2':
command.append('--enforceConstraint')
else:
fname = write([seq])
if not dangles:
command.append('--dangles=0')
if reweight is not None:
command.append('--commands=%s' % reweight)
if param_file:
command.append('--paramFile=%s' % param_file)
with open(fname) as f:
if DEBUG: print(fname)
if DEBUG: print(' '.join(command))
p = sp.Popen(command, stdin=f, stdout=sp.PIPE, stderr=sp.PIPE)
stdout, stderr = p.communicate()
if DEBUG:
print('stdout')
print(stdout)
print('stderr')
print(stderr)
if stderr.decode('utf-8').startswith('WARNING: '):
print(stderr)
if p.returncode:
raise Exception('RNAfold failed: on %s\n%s' % (seq, stderr))
os.remove(fname)
if version.startswith('2'):
if os.path.exists('{}_0001_ss.ps'.format(output_id)):
os.remove("%s_0001_ss.ps" % output_id)
else:
print('Warning! {} does not exists!'.format(output_id))
if 'omitting constraint' in stderr.decode('utf-8'):
free_energy = np.inf # Impossible structure
else:
m = re.search('([,|\(\.\)\]\[\{\}]+)\s+\[\s*(-*[0-9]+\.[0-9]+)', stdout.decode('utf-8'))
free_energy = float(m.group(2))
if DEBUG: print('free_energy: ', free_energy)
if return_free_energy:
return free_energy, output_dot_ps_file
else: # return Z
return np.exp(-1*free_energy/(.0019899*(273+T))), output_dot_ps_file
def pfunc_contrafold_(seq, T=37, version='2', constraint=None, bpps=False,
param_file=None, return_free_energy=False, DIRLOC=None, DEBUG=False):
"""get partition function structure representation and free energy
Args:
seq (str): nucleic acid sequence
T (float): temperature
constraint (str): structure constraints
motif (str): argument to vienna motif
DIRLOC: sets location of contrafold specifically (Useful if there's several EternaFold builds to compare.)
Returns
float: partition function
Note: If the constraint is impossible then Z wil be equal to the Z unconstrained
"""
if not version: version='2'
fname = '%s.in' % filename()
if DIRLOC is not None:
LOC=DIRLOC
if version.startswith('2'):
LOC=package_locs['contrafold_2']
elif version.startswith('1'):
LOC=package_locs['contrafold_1']
else:
raise RuntimeError('Error, Contrafold version %s not present' % version)
command = ['%s/contrafold' % LOC, 'predict', fname]
if bpps:
posterior_fname = '%s.posteriors' % filename()
command = command + ['--posteriors', '0.0000000001', posterior_fname]
else:
command.append('--partition')
if param_file is not None:
command = command + ['--params', param_file]
if constraint is not None:
convert_dbn_to_contrafold_input(seq, constraint, fname)
command.append('--constraints')
else:
convert_dbn_to_contrafold_input(seq, ''.join(['.' for x in range(len(seq))]), fname)
if DEBUG: print(' '.join(command))
p = sp.Popen(command, stdout=sp.PIPE, stderr=sp.PIPE)
stdout, stderr = p.communicate()
if DEBUG:
print('stdout')
print(stdout)
print('stderr')
print(stderr)
if p.returncode:
raise Exception('Contrafold failed: on %s\n%s' % (seq, stderr))
#os.remove(fname)
if not bpps:
logZ = float(stdout.decode('utf-8').rstrip().split()[-1])
if return_free_energy:
return -1*logZ, None
else:
return np.exp(logZ), None
else:
return 0, posterior_fname
def pfunc_rnasoft_(seq, version='99', T=37, constraint=None, bpps=False, return_free_energy=False, DEBUG=False):
DIR = package_locs['rnasoft']
if not version: version='blstar'
#note for mfe will use simfold instead of simfold pf
# supported versions: 07, 99, 99-no-dangles, BL-no-dangles, BLstar, LAM-CG, NOM-CG
param_locs = {'07': '%s/params/CG_best_parameters_ISMB2007.txt' % DIR,
'99': '%s/params/turner_parameters_fm363_constrdangles.txt' % DIR,
'99-no-dangles': '%s/params/turner_parameters_fm363_dangles0.txt' % DIR,
'bl-no-dangles': '%s/params/BL-no-dangles.txt' % DIR,
'blstar': '%s/params/BLstar.txt' % DIR,
'lam-cg': '%s/params/LAM-CG.txt' % DIR,
'nom-cg': '%s/params/NOM-CG.txt' % DIR}
command = ['%s/simfold_pf' % DIR, '-s', seq, '-p', param_locs[version]]
if DEBUG: print(' '.join(command))
p = sp.Popen(command, stdout=sp.PIPE, stderr=sp.PIPE)
stdout, stderr = p.communicate()
if DEBUG:
print('stdout')
print(stdout)
print('stderr')
print(stderr)
bpps_fname = '%s.bpps' % filename()
if bpps:
with open(bpps_fname,'w') as f:
for line in stdout.decode('utf-8').split('\n')[5:]:
if not 'Glog' in line and len(line) > 1:
f.write(line+'\n')
if p.returncode:
raise Exception('RNAsoft partition failed: on %s\n%s' % (seq, stderr))
Z = float(stdout.decode('utf-8').split('\n')[1].split()[-1])
if return_free_energy:
return -1*np.log(Z), bpps_fname
else:
return Z, bpps_fname
def pfunc_nupack_(seq, version='95', T=37, dangles=True, constraint=None, return_free_energy=False, pseudo=False, dna=False, DEBUG=False):
if not version: version='95'
nupack_materials={'95': 'rna1995', '99': 'rna1999', 'dna':'dna1998'}
if dna: version='dna'
DIR = package_locs['nupack']
if dangles:
dangle_option='some'
else:
dangle_option='none'
if constraint is not None:
if '.' in constraint:
print('Warning: NUPACK does not do constrained folding . and x')
seqfile = write([seq, constraint])
command=['%s/energy' % DIR, '%s' % seqfile.replace('.in',''),'-T', str(T),
'-material', nupack_materials[version], '-dangles', dangle_option]
else:
seqfile = write([seq])
command=['%s/pfunc' % DIR, '%s' % seqfile.replace('.in',''),'-T', str(T),
'-material', nupack_materials[version], '-dangles', dangle_option]
if pseudo:
command.append('--pseudo')
if DEBUG: print(' '.join(command))
p = sp.Popen(command, stdout=sp.PIPE, stderr=sp.PIPE)
stdout, stderr = p.communicate()
if DEBUG:
print('stdout')
print(stdout)
print('stderr')
print(stderr)
if p.returncode:
raise Exception('Nupack pfunc failed: on %s\n%s' % (seq, stderr))
if constraint is not None:
free_energy = float(stdout.decode('utf-8').split('\n')[-2])
Z=np.exp(-1*free_energy/(.0019899*(273+T)))
else:
free_energy = float(stdout.decode('utf-8').split('\n')[-3])
Z=float(stdout.decode('utf-8').split('\n')[-2])
os.remove(seqfile)
if return_free_energy:
return free_energy, None
else:
return Z, None
def pfunc_rnastructure_(seq, version=None, T=37, constraint=None, coaxial=True,
bpps=False, return_free_energy=False, DEBUG=False):
"""get partition function structure representation and free energy
Args:
seq (str): nucleic acid sequence
T (float): temperature
constraint (str): structure constraints
motif (str): argument to vienna motif
coaxial (bool): Coaxial stacking or not (default True)
Returns
float: partition function
"""
seqfile = write([seq])
pfsfile = '%s.pfs' % filename()
DIR = package_locs['rnastructure']
command = ['%s/partition' % DIR, seqfile, pfsfile, '-T', str(T+273)]
if not coaxial:
command.extend(['--disablecoax'])
if constraint is not None:
fname = '%s.CON' % filename()
#print(fname)
convert_dbn_to_RNAstructure_input(seq, constraint, fname)
command.extend(['--constraint', fname])
if DEBUG: print(' '.join(command))
p = sp.Popen(command, stdout=sp.PIPE, stderr=sp.PIPE)
stdout, stderr = p.communicate()
if DEBUG:
print('stdout')
print(stdout)
print('stderr')
print(stderr)
if p.returncode:
raise Exception('RNAstructure partition failed: on %s\n%s' % (seq, stderr))
os.remove(seqfile)
if not bpps:
command = ['%s/EnsembleEnergy' % DIR, pfsfile]
if DEBUG: print(' '.join(command))
p = sp.Popen(command, stdout=sp.PIPE, stderr=sp.PIPE)
stdout, stderr = p.communicate()
if DEBUG:
print('stdout')
print(stdout)
print('stderr')
print(stderr)
if p.returncode:
raise Exception('RNAstructure EnsembleEnergy failed: on %s\n%s' % (seq, stderr))
if DEBUG: print(stdout.decode('utf-8').split('\n')[3])
free_energy = float(stdout.decode('utf-8').split('\n')[3].split(' ')[-2])
if return_free_energy:
return free_energy, pfsfile
else:
return np.exp(-1*free_energy/(.0019*(273+T))), pfsfile
else:
return 0, pfsfile
def pfunc_vfold_(seq, version='0', T=37, coaxial=True, bpps=False, DEBUG=False):
#available versions: 0 for Turner 04 params, 1 for Mfold 2.3 params
#for bpps
# command = ['%s/Vfold2d_npk_mac.o %d %d %s %s %d' % (DIR, int(coaxial),\
# T, infile, outfile, int(version))]
DIR = package_locs["vfold"]
cwd = os.getcwd()
os.chdir(DIR) #vfold precompiled binaries don't work being called from elsewhere
if DEBUG: print(os.getcwd())
seqfile = write([seq])
if sys.platform=="linux":
platform='linux'
elif sys.platform=="darwin":
platform='mac'
elif sys.platform=="win32":
platform='win'
else:
raise RuntimeError('Vfold has binaries for linux, macOS, and win')
command = ['./VfoldThermal_npk_%s.o %d %d %d %s tmp %d; cat tmp; rm tmp' % (platform, int(coaxial), T, T, seqfile, int(version))]
if DEBUG: print(' '.join(command))
p = sp.Popen(command, stdout=sp.PIPE, stderr=sp.PIPE, shell=True)
stdout, stderr = p.communicate()
os.chdir(cwd)
if DEBUG:
print('stdout')
print(stdout)
print('stderr')
print(stderr)
if p.returncode:
raise Exception('VfoldThermal_npk failed: on %s\n%s' % (seq, stderr))
Z=float(stdout.decode('utf-8').split('\n')[-2].split()[1])
os.remove(seqfile)
return Z, None
#output: take second field of last line for Z
def pfunc_linearpartition_(seq, bpps=False, package='contrafold', beam_size=100, return_free_energy=False, DEBUG=False):
LOC = package_locs['linearpartition']
tmp_file = filename()
tmp_command = filename()
if bpps:
pf_only = 0
else:
pf_only = 1
# args: beamsize, is_sharpturn, is_verbose, bpp_file, bpp_prefix, pf_only, bpp_cutoff
command=['echo %s | %s/linearpartition_%s' % (seq, LOC, package[0].lower()), str(beam_size),
'0', '0', tmp_file, '_', str(pf_only), '0.000001']
with open('%s.sh' % tmp_command,'w') as f:
f.write(' '.join(command))
if DEBUG: print(' '.join(command))
meta_command = ['chmod +x %s.sh; %s.sh' % (tmp_command, tmp_command)]
p = sp.Popen(meta_command, stdout=sp.PIPE, stderr=sp.PIPE,shell=True)
stdout, stderr = p.communicate(input=str.encode(seq))
if DEBUG:
print('stdout')
print(stdout)
print('stderr')
print(stderr)
if p.returncode:
raise Exception('LinearPartition failed: on %s\n%s' % (seq, stderr))
os.remove("%s.sh" % tmp_command)
# Note: the linearfold exec says this is free energy in kcal/mol.
# this is still just cfold log Z
# linearfold returns two different things depending on which package
if bpps:
return 0, tmp_file
else:
if package in ['contrafold','eternafold']:
logZ=float(stdout.decode('utf-8').split(' ')[-1])
if return_free_energy:
return -1*logZ, None
else:
return np.exp(logZ), None
elif package=='vienna':
free_energy = float(stdout.decode('utf-8').split(' ')[-2])
T=37
if return_free_energy:
return free_energy, None
else:
return np.exp(-1*free_energy/(.0019899*(273+T))), None
| StarcoderdataPython |
3413745 | # Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
#
# This source code is licensed under the BSD license found in the
# LICENSE file in the root directory of this source tree.
import copy
import os
import sys
from contextlib import suppress
from typing import Any, List, Type, cast
import numpy as np
import torch
import torch.distributed as dist
if not dist.is_available():
print("Distributed not available, skipping tests", file=sys.stderr)
sys.exit(0)
from torch.distributed.algorithms.join import _Join, _JoinHook
from torch.distributed.optim import ZeroRedundancyOptimizer
from torch.distributed.optim.zero_redundancy_optimizer import _broadcast_object
from torch.nn.parallel import DistributedDataParallel as DDP
from torch.optim import SGD
from torch.testing._internal import common_distributed, common_utils
BACKEND = dist.Backend.NCCL if torch.cuda.is_available() else dist.Backend.GLOO
DEVICE = "cuda" if torch.cuda.is_available() else "cpu"
def check_same_model_params(model_a: torch.nn.Module, model_b: torch.nn.Module, message: str = "") -> None:
for p_a, p_b in zip(model_a.parameters(), model_b.parameters()):
assert torch.allclose(p_a, p_b, atol=1e-3), f"Model parameters differ\n{p_a} {p_b}\n" + message
for b_a, b_b in zip(model_a.buffers(), model_b.buffers()):
assert torch.allclose(b_a, b_b), f"Model buffers differ {b_a} - {b_b}\n" + message
class TestZeroRedundancyOptimizer(common_distributed.MultiProcessTestCase):
def setUp(self):
super(TestZeroRedundancyOptimizer, self).setUp()
os.environ["WORLD_SIZE"] = str(self.world_size)
self._spawn_processes()
@property
def device(self):
return torch.device(self.rank) if BACKEND == dist.Backend.NCCL else torch.device("cpu")
@property
def world_size(self):
return 1
def tearDown(self):
try:
torch.distributed.destroy_process_group()
except AssertionError:
pass
try:
os.remove(self.file_name)
except OSError:
pass
def dist_init(self, rank, world_size=-1, backend=BACKEND):
if (world_size < 1):
world_size = self.world_size
store = dist.FileStore(self.file_name, world_size)
return dist.init_process_group(backend=backend, store=store, rank=rank, world_size=world_size)
class TestZeroRedundancyOptimizerSingleRank(TestZeroRedundancyOptimizer):
def test_state_dict(self):
"""Check that the ZeroRedundancyOptimizer exposes the expected state dict interface,
irrespective of the sharding.
"""
self.dist_init(self.rank)
x = torch.tensor([1.0], device=DEVICE, requires_grad=True)
o = ZeroRedundancyOptimizer([x], optimizer_class=SGD, lr=0.1, momentum=0.9)
x.backward()
o.step()
self.assertEqual(x, torch.tensor([0.9], device=DEVICE))
self.assertEqual(o.optim.state[x]["momentum_buffer"], torch.tensor([1.0], device=DEVICE))
o.zero_grad()
o.consolidate_state_dict() # Sync state dict in between replicas - even if there are none
state_dict = o.state_dict()
# Check that the state dict is pytorch-compliant key wise
self.assertIn("param_groups", state_dict.keys())
self.assertIn("state", state_dict.keys())
# Check that the pulled state is what we expect, and that we have all the expected keys
self.assertEqual(state_dict["param_groups"][0]["lr"], 0.1)
self.assertEqual(state_dict["param_groups"][0]["momentum"], 0.9)
self.assertFalse(state_dict["param_groups"][0]["nesterov"])
self.assertEqual(state_dict["param_groups"][0]["weight_decay"], 0.0)
self.assertEqual(state_dict["param_groups"][0]["dampening"], 0.0)
# Check that the pulled state and the .param_groups attribute are in sync
for k in state_dict["param_groups"][0].keys():
if k != "params":
self.assertEqual(state_dict["param_groups"][0][k], o.param_groups[0][k])
# Check that it's correctly loaded
o = ZeroRedundancyOptimizer([x], optimizer_class=SGD, lr=0.01)
o.load_state_dict(state_dict)
# Check that state is correct and on proper device
self.assertEqual(o.optim.state[x]["momentum_buffer"], torch.tensor([1.0], device=DEVICE))
# We should now be using a lr of 0.1, both within the optimizer
# and as exposed by the .param_groups attribute
assert o.param_groups[0]["lr"] == 0.1
x.backward()
o.step()
self.assertEqual(x, torch.tensor([0.71], device=DEVICE))
self.assertEqual(o.optim.state[x]["momentum_buffer"], torch.tensor([1.9], device=DEVICE))
# Check that the exposed param_groups are on the proper device
self.assertEqual(o.param_groups[0]["params"][0].device, x.device)
def test_lr_scheduler(self):
""" Check that a normal torch lr_scheduler is usable with ZeroRedundancyOptimizer"""
self.dist_init(self.rank)
x = torch.tensor([1.0], device=DEVICE, requires_grad=True)
x2 = torch.tensor([1.0], device=DEVICE, requires_grad=True)
o = ZeroRedundancyOptimizer([x], optimizer_class=SGD, lr=0.01)
o2 = torch.optim.SGD([x2], lr=0.01)
s = torch.optim.lr_scheduler.StepLR(o, 1)
s2 = torch.optim.lr_scheduler.StepLR(o2, 1)
for _ in range(5):
x.backward()
o.zero_grad()
o.step()
s.step()
x2.backward()
o2.zero_grad()
o2.step()
s2.step()
self.assertEqual(x, x2)
def test_step_with_kwargs(self):
""" Check that the `step(**kwargs)` interface is properly exposed"""
self.dist_init(self.rank)
class SGDWithStepKWArg(torch.optim.SGD):
def step(self, closure=None, kwarg=None):
super().step()
kwarg.append(5)
kwarg: List[Any] = []
x = torch.tensor([1.0], device=DEVICE, requires_grad=True)
o = ZeroRedundancyOptimizer([x], optimizer_class=SGDWithStepKWArg, lr=0.1)
x.backward()
o.step(0, kwarg=kwarg)
self.assertEqual(kwarg, [5])
self.assertEqual(x, torch.tensor([0.9], device=DEVICE))
def test_step_with_extra_inner_key(self):
"""Check that an optimizer adding extra keys to the param_groups
is properly handled, in that the new key is exposed to the user
"""
self.dist_init(self.rank)
class SGDWithNewKey(torch.optim.SGD):
# Dummy optimizer which adds a new key to the param groups
def step(self, closure=None):
super().step()
self.param_groups[0]["new_key"] = 0.1
x = torch.tensor([1.0], device=DEVICE, requires_grad=True)
o = ZeroRedundancyOptimizer([x], optimizer_class=SGDWithNewKey, lr=0.1)
x.backward()
o.step()
self.assertEqual(o.param_groups[0]["new_key"], 0.1)
self.assertEqual(x, torch.tensor([0.9], device=DEVICE))
def test_step_without_closure(self):
"""Check that the step() method (without closure) is handlded as expected"""
self.dist_init(self.rank)
class SGDWithoutClosure(torch.optim.SGD):
def step(self):
return super().step()
x = torch.tensor([1.0], device=DEVICE, requires_grad=True)
o = ZeroRedundancyOptimizer([x], optimizer_class=SGDWithoutClosure, lr=0.1)
x.backward()
o.step()
self.assertEqual(x, torch.tensor([0.9], device=DEVICE))
def test_zero_grad(self):
"""Check that the zero_grad attribute is properly handled"""
self.dist_init(self.rank)
x = torch.rand(1)
m = torch.nn.Linear(1, 1)
o = ZeroRedundancyOptimizer(m.parameters(), optimizer_class=SGD, lr=0.1)
y = m(x)
y.backward(x)
self.assertNotEqual(m.weight.grad, torch.zeros_like(m.weight))
self.assertNotEqual(m.weight.grad, torch.zeros_like(m.weight))
o.zero_grad()
self.assertFalse(m.weight.grad)
self.assertFalse(m.bias.grad)
def test_constructor(self):
"""Check the robustness of the ZeroRedundancyOptimizer constructor by
passing different values for `params`"""
self.dist_init(self.rank)
m = torch.nn.Linear(1, 1)
# (input, expected error)
inputs = [
([], ValueError), # empty parameter list
(torch.randn(1), TypeError), # non-iterable: `torch.Tensor`
(1.2, TypeError), # non-iterable: `float`
([{"params": m.parameters()}], TypeError), # iterable of dict
(list(m.parameters()) + [42], TypeError), # iterable containing non-`torch.Tensor`
(m.parameters(), None), # `params` as a generator
(list(m.parameters()), None) # `params` as a list
]
for input, error in inputs:
if (error):
with self.assertRaises(error):
ZeroRedundancyOptimizer(input, optimizer_class=SGD, lr=0.1)
else:
ZeroRedundancyOptimizer(input, optimizer_class=SGD, lr=0.1)
def test_same_dense_param_type(self):
"""Check that ZeroRedundancyOptimizer raises an exception if the input
parameters include sparse tensors or different dense types.
NOTE: This test should be removed once support for sparse parameters
and varying parameter types is added.
"""
self.dist_init(self.rank)
inputs = [
[torch.sparse_coo_tensor(size=(2, 3))],
[torch.FloatTensor(1), torch.DoubleTensor(1)],
[torch.FloatTensor(1), torch.FloatTensor(1),
torch.sparse_coo_tensor(size=(2, 3))]
]
for input in inputs:
with self.assertRaises(ValueError):
ZeroRedundancyOptimizer(input, optimizer_class=SGD, lr=0.1)
class TestZeroRedundancyOptimizerDistributed(TestZeroRedundancyOptimizer):
@property
def world_size(self):
return min(4, max(2, torch.cuda.device_count()))
@common_distributed.skip_if_rocm
def test_step(self):
""" Check that the ZeroRedundancyOptimizer wrapper properly exposes the `.step()` interface"""
if self.rank >= self.world_size or (BACKEND == dist.Backend.NCCL and torch.cuda.device_count() < 2):
return
self.dist_init(self.rank, world_size=self.world_size)
context = suppress() if not torch.cuda.is_available() else torch.cuda.device(self.rank)
with context:
x = torch.tensor([float(self.rank + 1)], device=self.device)
m = torch.nn.Linear(1, 1)
m.weight.data = torch.tensor([[1.0]])
m.bias.data = torch.tensor([2.0])
m_zero = copy.deepcopy(m)
m.to(self.device)
m_zero.to(self.device)
lr = 0.1
o = SGD(m.parameters(), lr=lr)
o_zero = ZeroRedundancyOptimizer(m_zero.parameters(), optimizer_class=SGD, lr=lr)
y = m(x)
y.backward(x)
y_zero = m_zero(x)
y_zero.backward(x)
for p in m.parameters():
dist.all_reduce(p.grad.data, op=dist.ReduceOp.SUM)
p.grad.data /= self.world_size
o.step()
for p in m_zero.parameters():
dist.all_reduce(p.grad.data, op=dist.ReduceOp.SUM)
p.grad.data /= self.world_size
o_zero.step()
self.assertEqual(m.weight, m_zero.weight)
self.assertEqual(m.bias, m_zero.bias)
@common_distributed.skip_if_rocm
def test_step_with_closure(self):
""" Check that the ZeroRedundancyOptimizer wrapper properly exposes the `.step(closure)` interface"""
if self.rank >= self.world_size or (BACKEND == dist.Backend.NCCL and torch.cuda.device_count() < 2):
return
self.dist_init(self.rank, world_size=self.world_size)
context = suppress() if not torch.cuda.is_available() else torch.cuda.device(self.rank)
with context:
for bucket_view in [False, True]:
x_val = self.rank + 1
weight = 1.0
bias = 2.0
error = 1.0
target = torch.tensor([x_val * weight + bias + error], device=self.device)
loss_fn = torch.nn.L1Loss()
x = torch.tensor([float(x_val)], device=self.device)
m = torch.nn.Linear(1, 1)
m.weight.data = torch.tensor([[weight]])
m.bias.data = torch.tensor([bias])
m.to(self.device)
o = ZeroRedundancyOptimizer(
m.parameters(),
optimizer_class=SGD,
parameters_as_bucket_view=bucket_view,
lr=0.1,
)
y = m(x)
y.backward(x)
for p in m.parameters():
dist.all_reduce(p.grad.data, op=dist.ReduceOp.SUM)
p.grad.data /= self.world_size
def closure():
o.zero_grad()
output = m(x)
loss = loss_fn(output, target)
loss.backward()
return loss
loss = o.step(closure=closure)
self.assertEqual(loss, torch.tensor(error))
self.assertEqual(m.weight, torch.tensor([[1.1]]))
self.assertEqual(m.bias, torch.tensor([2.1]))
def test_sharding(self):
""" Check the sharding at construction time
NOTE: The correctness of this test depends on the ZeRO implementation
using the sorted-greedy partitioning algorithm. For details, see
`ZeroRedundancyOptimizer._partition_parameters()` in
`zero_redundancy_optimizer.py`.
"""
self.dist_init(self.rank)
sizes = [9, 7, 5, 3]
params = []
for size in sizes * self.world_size:
params.append(torch.rand(size, 1))
o = ZeroRedundancyOptimizer(params, optimizer_class=SGD, lr=0.1)
self.assertEqual(sum([x.numel() for x in o.optim.param_groups[0]["params"]]), sum(sizes))
def test_add_param_group(self):
"""Check that ZeroRedundancyOptimizer properly handles adding a new param_group a posteriori,
and that all ranks get a shard
NOTE: The correctness of this test depends on the ZeRO implementation
using the sorted-greedy partitioning algorithm. For details, see
`ZeroRedundancyOptimizer._partition_parameters()` in
`zero_redundancy_optimizer.py`.
"""
self.dist_init(self.rank)
# Test with all parameters trainable to begin with
def all_trainable():
params = []
sizes = [9, 7, 5, 3]
sizes_world = sizes * self.world_size
for size in sizes_world[:-1]:
params.append(torch.rand(size, 1))
# Make sure that the params are trainable, enforces size-based partitioning
for p in params:
p.requires_grad = True
o = ZeroRedundancyOptimizer(params, optimizer_class=SGD, lr=0.1)
assert len(o.param_groups) == 1
o.add_param_group({"params": [torch.rand(3, 1)]})
assert len(o.param_groups) == 2
# Verify that added group is added to the correct partition making all have the same elements.
assert sum([x.numel() for g in o.optim.param_groups for x in g["params"]]) == sum(sizes)
assert len(o.optim.param_groups) == 2
# Test a pathological config with a first big non-trainable param
def some_trainable():
params = []
for size in [100, 3, 5, 2, 6, 4]:
params.append(torch.rand(size, 1))
# Make sure that the params are trainable, enforces size-based partitioning
for p in params[1:]:
p.requires_grad = True
o = ZeroRedundancyOptimizer(params, optimizer_class=SGD, lr=0.1)
assert len(o.param_groups) == 1
o.add_param_group({"params": [torch.rand(3, 1)]})
assert len(o.param_groups) == 2
assert len(o.optim.param_groups) == 2
all_trainable()
some_trainable()
@common_distributed.skip_if_lt_x_gpu(2)
def test_collect_shards(self):
""" Check the state consolidation mechanism, and the state dict exposed by ZeroRedundancyOptimizer"""
self.dist_init(self.rank)
RECIPIENT_RANK = 0
# Run a dummy step so that the optimizer state dict exists
batch, input_width, hidden, target_width = 3, 20, 10, 5
target = torch.rand((batch, target_width), device=self.device)
inputs = torch.rand((batch, input_width), device=self.device)
model = torch.nn.Sequential(torch.nn.Linear(input_width, hidden), torch.nn.Linear(hidden, target_width))
model.to(self.device)
loss_fn = torch.nn.L1Loss()
loss_fn.to(self.device)
# With SGD, Momentum is required to get a state to shard
optimizer = ZeroRedundancyOptimizer(model.parameters(), optimizer_class=SGD, lr=0.1, momentum=0.99)
def closure():
optimizer.zero_grad()
output = model(inputs)
loss = loss_fn(output, target)
loss.backward()
return loss
_ = optimizer.step(closure=closure)
# Update the optimizer state on the reference rank
optimizer.consolidate_state_dict(to=RECIPIENT_RANK)
# Fetch the state on the reference rank
# - check that it has the correct size
# - load it again
if self.rank == RECIPIENT_RANK:
optimizer_state_dict = optimizer.state_dict()
self.assertEqual(len(optimizer_state_dict["state"]), len(list(model.parameters())))
else:
optimizer_state_dict = {}
optimizer_state_dict = _broadcast_object(
optimizer_state_dict,
src_rank=RECIPIENT_RANK,
group=dist.group.WORLD,
device=self.device,
)
# Load the optimizer state dict, check that no exception is raised
optimizer.load_state_dict(optimizer_state_dict)
def test_multiple_groups(self):
""" Check that the ZeroRedundancyOptimizer handles working with multiple process groups"""
self.dist_init(self.rank, self.world_size, dist.Backend.GLOO)
# Only work with the even ranks, to check that the global_rank indexing is properly used
sub_group_ranks = list(filter(lambda x: x % 2 == 0, range(self.world_size)))
process_group = torch.distributed.new_group(ranks=sub_group_ranks, backend="gloo")
# Make sure that all the ranks get different training data
# So that the sync check in between their models is meaningful
torch.manual_seed(self.rank)
np.random.seed(self.rank)
# Standard deep learning setup
epochs, batch, input_width, hidden, target_width = 5, 3, 20, 10, 5
loss_fn = torch.nn.L1Loss().to(self.device)
def check(optimizer):
# Just run a couple of epochs, check that the model is properly updated
for _ in range(epochs):
target = torch.rand((batch, target_width), device=self.device)
inputs = torch.rand((batch, input_width), device=self.device)
def closure():
optimizer.zero_grad()
output = model(inputs)
loss = loss_fn(output, target)
loss /= self.world_size
loss.backward()
dist.all_reduce(loss, group=process_group) # Not strictly needed for the test below
return loss
_ = optimizer.step(closure=closure)
# Check that all the params are the same on all ranks
for pg in optimizer.param_groups:
for p in pg["params"]:
receptacle = [p.clone() for _ in sub_group_ranks] if self.rank == 0 else []
dist.gather(p, receptacle, dst=0, group=process_group)
if self.rank == 0:
for sync_p in receptacle[1:]:
assert torch.all(torch.eq(receptacle[0], sync_p)), "Models differ in between ranks"
if self.rank in sub_group_ranks:
# Model fitting in the broadcast bucket
model = torch.nn.Sequential(
torch.nn.Linear(input_width, hidden),
torch.nn.Linear(hidden, target_width),
).to(self.device)
# With SGD, Momentum is required to get a state to shard
optimizer = ZeroRedundancyOptimizer(
model.parameters(), optimizer_class=SGD, lr=0.1, momentum=0.99, process_group=process_group
)
check(optimizer)
# Model not-fitting in the broadcast bucket
model = torch.nn.Sequential(
torch.nn.Linear(input_width, hidden),
torch.nn.Linear(hidden, target_width),
).to(self.device)
# With SGD, Momentum is required to get a state to shard
optimizer = ZeroRedundancyOptimizer(
model.parameters(),
optimizer_class=SGD,
lr=0.1,
momentum=0.99,
process_group=process_group,
)
check(optimizer)
@common_distributed.skip_if_no_gpu
def test_local_optimizer_parity(self):
"""When combined with DDP, check that ZeroRedundancyOptimizer(optimizer) and the same monolithic optimizer
give the exact same results
"""
self.dist_init(self.rank)
BATCHS = 20
with torch.cuda.device(self.rank):
torch.manual_seed(self.rank)
np.random.seed(self.rank)
def check_optimizer_equivalence(optimizer: Type[torch.optim.Optimizer]):
# Any model works. Add one different buffer per rank
model = torch.nn.Sequential(
torch.nn.Linear(2, 3),
torch.nn.Linear(3, 3),
torch.nn.Linear(3, 3),
)
model.register_buffer("test_buffer", torch.ones((1)) * self.rank)
model.to(self.device)
sharded_optimizer = ZeroRedundancyOptimizer(
params=model.parameters(), optimizer_class=optimizer, lr=1e-3
)
sharded_ddp_model = DDP(
module=model, device_ids=[self.rank], broadcast_buffers=True, find_unused_parameters=True
)
ddp_model_single = copy.deepcopy(model)
ddp_model_single.to(self.device)
ddp_optimizer = optimizer(ddp_model_single.parameters(), lr=1e-3)
ddp_model = DDP(
ddp_model_single, device_ids=[self.rank], broadcast_buffers=True, find_unused_parameters=True
)
# The model should be synchronized in between the ranks at construction time, check that
check_same_model_params(sharded_ddp_model, ddp_model, "Models differ from the start")
def check_step():
input_tensor = torch.rand((64, 2))
def closure_ddp(input_tensor=input_tensor):
ddp_optimizer.zero_grad()
ddp_loss = ddp_model(input_tensor).abs().sum()
ddp_loss.backward()
return ddp_loss
def closure_sharded(input_tensor=input_tensor):
sharded_optimizer.zero_grad()
sharded_loss = sharded_ddp_model(input_tensor).abs().sum()
sharded_loss.backward()
return sharded_loss
loss_ddp = cast(torch.Tensor, ddp_optimizer.step(closure=closure_ddp))
loss_sharded_optim = cast(torch.Tensor, sharded_optimizer.step(closure=closure_sharded))
assert torch.allclose(
loss_ddp, loss_sharded_optim
), "Losses differ in between Pytorch optim and ZeroRedundancyOptimizer"
check_same_model_params(sharded_ddp_model, ddp_model, "Models differ after a step")
# The models should stay the same in between the ranks
for i in range(BATCHS):
check_step()
# Change the models trainability, check that parity is maintained
# only check after a couple of constant batchs to go through both regimes
if i > BATCHS // 2:
next(ddp_model.parameters()).requires_grad = bool(i % 2)
next(sharded_ddp_model.parameters()).requires_grad = bool(i % 2)
# Check that the checkpoints are compatible
reference_rank = 0
# - get states
ddp_state_dict = ddp_optimizer.state_dict()
sharded_optimizer.consolidate_state_dict(to=reference_rank)
sharded_optim_state_dict = [sharded_optimizer.state_dict() if self.rank == reference_rank else {}]
dist.broadcast_object_list(sharded_optim_state_dict, src=reference_rank, group=dist.group.WORLD)
sharded_optim_state_dict = sharded_optim_state_dict[0]
# - cross load the states
# run one step and check that the models are still the same
ddp_state_dict_ref = copy.deepcopy(ddp_state_dict) # OSS will remove some states
ddp_optimizer.load_state_dict(sharded_optim_state_dict) # mixup on purpose !
sharded_optimizer.load_state_dict(ddp_state_dict)
check_step()
# - self load, rewind, check no problem
# run one step and check that the models are still the same
ddp_optimizer.load_state_dict(ddp_state_dict_ref)
sharded_optimizer.load_state_dict(sharded_optim_state_dict)
check_step()
for opt in [torch.optim.SGD, torch.optim.Adam]:
check_optimizer_equivalence(opt)
def _test_zero_join(self, device):
r"""
Check that the ZeRO join hook allows training with uneven inputs when using the given device.
Arguments:
device (torch.device): device used to store parameters and perform
collective communications.
"""
NUM_INPUTS = 3
NUM_EPOCHS = 2
torch.manual_seed(0)
torch.cuda.manual_seed(0)
rank = self.rank
world_size = self.world_size
is_gpu = device.type == "cuda"
backend = dist.Backend.NCCL if is_gpu else dist.Backend.GLOO
self.dist_init(rank, world_size, backend)
if BACKEND == dist.Backend.NCCL and is_gpu:
torch.cuda.set_device(self.device)
model = torch.nn.Sequential(
torch.nn.Linear(2, 3),
torch.nn.Linear(3, 3),
torch.nn.Linear(3, 3),
)
model.to(device)
# DDP ensures correct gradients in data parallel training, so DDP with
# local optimizers on uneven inputs should be equivalent to ZeRO on
# uneven inputs with gradients being manually set
ddp_model = DDP(model, device_ids=[rank]) if is_gpu else DDP(model)
local_optim = torch.optim.Adam(ddp_model.parameters(), lr=0.01)
zero_model = copy.deepcopy(model)
zero_model.to(device)
zero_optim = ZeroRedundancyOptimizer(zero_model.parameters(), torch.optim.Adam, lr=0.01)
loss_fn = torch.nn.MSELoss()
# Use uneven inputs: rank i has i extra inputs
inputs = [torch.randn(20, 2).to(device) for _ in range(NUM_INPUTS + rank)]
labels = torch.randn(20, 3).to(device)
# Save the gradients and parameters from DDP as the ground truth; do
# so on the last-joining rank (in this case, the largest rank)
grads_at_each_iter = []
params_at_each_iter = []
with ddp_model.join():
for _ in range(NUM_EPOCHS):
for input in inputs:
output = ddp_model(input)
loss_fn(output, labels).backward()
if rank == world_size - 1:
grads = []
for p in ddp_model.parameters():
grads.append(p.grad.detach().clone().to(device))
local_optim.step()
if rank == world_size - 1:
params = []
for p in ddp_model.parameters():
params.append(p.detach().clone().to(device))
grads_at_each_iter.append(grads)
params_at_each_iter.append(params)
# Broadcast the saved gradients and parameters to all of the other
# ranks (which joined early)
grads_and_params = [grads_at_each_iter, params_at_each_iter]
grads_and_params = _broadcast_object(grads_and_params, src_rank=world_size - 1, group=dist.group.WORLD, device=device)
grads_at_each_iter = grads_and_params[0]
params_at_each_iter = grads_and_params[1]
# TODO: Replace this `_broadcast_object` with `broadcast_object_list`
# once the latter supports loading to the destination device instead
# of the source device
# A process must still set the remaining gradients after joining, so we
# define a join hook to do this before the ZeRO join hook
class _JoinGradInfo():
def __init__(self, grads, device):
self.grads = grads # remaining gradients to set (in order)
self.index = 0
self.device = device
class _SetGradsJoinHook(_JoinHook):
def __init__(self, zero_optim, grads, device):
zero_optim._join_grad_info = _JoinGradInfo(grads, device)
self.zero = zero_optim
super().__init__()
def main_hook(self):
grads = self.zero._join_grad_info.grads[self.zero._join_grad_info.index]
self.zero._join_grad_info.index += 1
for p, grad in zip(self.zero._all_params, grads):
p.grad = grad.detach().clone().to(self.zero._join_grad_info.device)
@property
def device(self):
return self.zero._join_grad_info.device
@property
def process_group(self):
return dist.group.WORLD
num_grads_after_joining = NUM_EPOCHS * (world_size - rank - 1)
grads = grads_at_each_iter[-num_grads_after_joining:]
set_grads_jh = _SetGradsJoinHook(zero_optim, grads, device)
zero_jh = zero_optim._join_hook()
iter = 0
with _Join([set_grads_jh, zero_jh]):
for _ in range(NUM_EPOCHS):
for input in inputs:
# Schedule an all-reduce to indicate not joined
dist.all_reduce(torch.ones(1, device=device), group=dist.group.WORLD)
# Set gradients manually
for p, grad in zip(zero_model.parameters(), grads_at_each_iter[iter]):
p.grad = grad.detach().clone().to(device)
# Perform optimizer step and check parity
zero_optim.step()
for p, ddp_p in zip(zero_model.parameters(), params_at_each_iter[iter]):
assert torch.allclose(p, ddp_p), \
"Parameters differ between using ZeRO and local optimizer"
iter += 1
@common_distributed.requires_nccl()
@common_distributed.skip_if_lt_x_gpu(2)
def test_zero_join_gpu(self):
"""Check that the ZeRO join hook allows training with uneven inputs on GPU."""
self._test_zero_join(self.device)
@common_distributed.requires_gloo()
def test_zero_join_cpu(self):
"""Check that the ZeRO join hook allows training with uneven inputs on CPU."""
self._test_zero_join(torch.device("cpu"))
def _test_zero_model_parallel(self, parameters_as_bucket_view: bool):
# Use two processes each with two GPUs
assert self.rank < 2
NUM_EPOCHS = 3
NUM_INPUTS = 5
LR = 0.01
torch.manual_seed(0)
torch.cuda.manual_seed(0)
class ModelParallelModel(torch.nn.Module):
def __init__(self, dev0, dev1):
super().__init__()
self.dev0 = dev0
self.dev1 = dev1
self.net0 = torch.nn.Linear(10, 10).to(dev0)
self.relu = torch.nn.ReLU()
self.net1 = torch.nn.Linear(10, 5).to(dev1)
def forward(self, x):
x = x.to(self.dev0)
x = self.relu(self.net0(x))
x = x.to(self.dev1)
return self.net1(x)
class LocalModel(torch.nn.Module):
def __init__(self):
super().__init__()
self.net0 = torch.nn.Linear(10, 10)
self.relu = torch.nn.ReLU()
self.net1 = torch.nn.Linear(10, 5)
def forward(self, x):
return self.net1(self.relu(self.net0(x)))
dev0 = 2 * self.rank
dev1 = 2 * self.rank + 1
mp_model = ModelParallelModel(dev0, dev1)
ddp_model = DDP(mp_model)
local_model = LocalModel()
cpu_device = torch.device("cpu")
# Ensure the parameters are the same across the two models
local_model.net0.weight = torch.nn.Parameter(mp_model.net0.weight.detach().clone().to(cpu_device))
local_model.net0.bias = torch.nn.Parameter(mp_model.net0.bias.detach().clone().to(cpu_device))
local_model.net1.weight = torch.nn.Parameter(mp_model.net1.weight.detach().clone().to(cpu_device))
local_model.net1.bias = torch.nn.Parameter(mp_model.net1.bias.detach().clone().to(cpu_device))
# Compare parity between DDP with model parallelism using ZeRO and
# a local model using a local optimizer
zero_optim = ZeroRedundancyOptimizer(
ddp_model.parameters(),
optimizer_class=torch.optim.Adam,
parameters_as_bucket_view=parameters_as_bucket_view,
lr=LR
)
local_optim = torch.optim.Adam(local_model.parameters(), lr=LR)
inputs = [torch.randn(20, 10) for _ in range(NUM_INPUTS)]
for _ in range(NUM_EPOCHS):
for input in inputs:
def closure_local():
local_optim.zero_grad()
local_loss = local_model(input).abs().sum()
local_loss.backward()
return local_loss
def closure_ddp():
zero_optim.zero_grad()
ddp_loss = ddp_model(input).abs().sum()
ddp_loss.backward()
return ddp_loss
local_loss = cast(torch.Tensor, local_optim.step(closure=closure_local))
ddp_loss = cast(torch.Tensor, zero_optim.step(closure=closure_ddp)).to(cpu_device)
assert torch.allclose(
local_loss, ddp_loss
), "Losses differ between local optim and ZeroRedundancyOptimizer"
for local_p, ddp_p in zip(local_model.parameters(), ddp_model.parameters()):
ddp_p = ddp_p.to(cpu_device)
assert torch.allclose(local_p, ddp_p), "Models differ after a step"
@common_distributed.skip_if_lt_x_gpu(4)
def test_zero_model_parallel_with_bucket_view(self):
"""
Check that ZeRO works with model parallelism where layers are sharded
across devices when ``parameters_as_bucket_view=True``.
"""
if self.rank >= 2:
return
self.dist_init(self.rank, world_size=2)
self._test_zero_model_parallel(parameters_as_bucket_view=True)
@common_distributed.skip_if_lt_x_gpu(4)
def test_zero_model_parallel_without_bucket_view(self):
"""
Check that ZeRO works with model parallelism where layers are sharded
across devices when ``parameters_as_bucket_view=False``.
"""
if self.rank >= 2:
return
self.dist_init(self.rank, world_size=2)
self._test_zero_model_parallel(parameters_as_bucket_view=False)
if __name__ == "__main__":
# ! unittest should not be used here, else the tests are not properly registered
common_utils.run_tests()
| StarcoderdataPython |
11375146 | <reponame>LDiek-/ai
#! /usr/bin/python
# -*- coding: utf-8 -*-
import json
# Neuronales Netz zum erlernen von Nomen-Regelung
__autor__ = "<NAME>"
__version__ = 1.0
class Connection():
def __init__(self, name, connector, connected, weight):
self.name = name
self.mode = "off"
self.weight = float(weight)
self.signalStrength = 0
self.way = [connector, connected, self.weight]
connected.addList(self)
def checkWeight(self, summand):
# Ändert Gewichtung dieser Verbindung, wenn Hebb-Regel erfüllt ist
if self.mode == "on" and self.way[1].isActive():
self.weight += summand
def update(self, signal):
"""
Erneuert das Signal, indem es diese mit der eigenen Gewichtung multipliziert
Nach jeder Eingabe wird überprüft, ob die Gewichtungen über 0.1 liegen.
:type signal: int
"""
self.mode = "on"
self.signalStrength = signal * self.weight
self.kill()
def get_off(self):
self.mode = "off"
self.signalStrength = 0
def getSignal(self):
if self.mode == "on":
return self.signalStrength
else:
return None
def kill(self):
"""
Diese Funktion soll, wenn die Gewichtung dieser Funktion nahe eines geringen Wertes geht,
sich selbst löschen
1: Löschen aus der Liste von dem Output-Neuron
2: Löschen aus verb_infos und verbindungen von dem angschlossenen Input-Neuron
"""
if self.weight < 0.1:
#1
connectedInfo = self.way[1]
connectedInfo.connections = filter(lambda x: x is not self, connectedInfo.connections)
#2
connector = self.way[0]
connector.verbindungen = filter(lambda x: x is not self, connector.verbindungen)
for key in connector.verb_infos:
if key == self.name:
del connector.verb_infos[key]
print "[-] killed Connection %s" % self.name
break
class Input_Neuron():
def __init__(self, reiz, neuron1):
# verb_infos: {'name der Connection': Gewichtung der Verbindung,...}
self.count_con = 0
self.verbindungen = []
self.verb_infos = {}
self.reiz = reiz
self.name = reiz
self.neuron1 = neuron1
self.neuronNames = [neuron1.name]
self.verbinden(self.neuron1)
self.eingabe = ""
def inputValue(self, eingabe):
"""
:type eingabe: basestring
Erneuert lokale Variable 'eingabe' und öffnet sendSignal()
Hier wird die Funktion auch aufegrufen, welche das Neuron aus allen Listen löscht und
somit 'tötet'
"""
self.eingabe = eingabe
self.sendSignal()
def verbinden(self, neuron):
# Fügt ein Verbindungs-Objekt dem Array hinzu
self.verbindungen.append(Connection(self.name+str(self.count_con),
self, neuron, 1))
self.count_con += 1
self.verb_infos[self.name+str(self.count_con)] = 1
def _bewerteEingabe(self):
#Berechnet ähnlichkeit der letzten silbe
erg = 0
self.eingabe = self.eingabe[len(self.eingabe)-len(self.reiz):]
for count in range(len(self.eingabe)):
if self.eingabe[count] == self.reiz[count]:
erg += 1.0/float(len(self.reiz))
return erg
def changeWeight(self, val):
"""
Erneurt Gewichtungen der anliegenden Verbindungen per Addition
:type val: float
"""
for conn in self.verbindungen:
conn.weight += val
def sendSignal(self, val=None):
"""
Bewertet die in der Klasse gespeicherte Eingabe und erneuert das Signal aller
an diesem Neuron angeschlossenen Verbindungen.
Wirde passiv durch inputValue() geöffnet.
Darf positive Signale nur an IsNomen senden
val ist Optional, falls die Gewichtung von außen her verändert werden soll
:type val: int
"""
if val:
eingabeWert = val
else:
eingabeWert = self._bewerteEingabe()
for verbindung in self.verbindungen:
verbindung.update(eingabeWert)
def getDict(self):
"""
Benutzt __dict__ funktion um alle Variablen als Dictionary auszugeben,
jedoch werden self.verbindungen nicht ausgegeben, da sie zum wiederherstellen
aufgrund self.verb_infos nicht nötig sind und trotzdem in umgewandelt werden müssten
:rtype: dict
"""
myDict = self.__dict__
if "verbindungen" in myDict: del myDict['verbindungen']
if "neuron1" in myDict: del myDict['neuron1']
if "neuron2" in myDict: del myDict['neuron2']
# myDict in Unicode "ISO-8859-1" verwandeln
for key in myDict:
if isinstance(myDict[key], list):
myDict[key] = map(lambda x: self.make_unicode(x), myDict[key])
elif isinstance(myDict[key], dict):
for newKey in myDict[key]:
# Daten des types Int müssen nicht umgewandelt werden
if not isinstance(myDict[key][newKey], int):
myDict[key] = self.make_unicode(myDict[key][newKey])
elif not isinstance(myDict[key], int):
myDict[key] = self.make_unicode(myDict[key])
return myDict
def getNewConnection(self, conns, outputNeurons):
"""
:type conns: list
:type outputNeurons: list
Hier werden nach laden der alten Daten von einer Datei die neuen
initialisiert
Dies funktioniert, indem er zuerst die noch nicht verbundenden verbindet
und dann alle mit einer neuen Gewichtung aktualisiert
conns: [{Daten von einer Connection},{Daten zweiter Connection},...]
Da die Daten einer Connection nur aus dem Name und der Gewichtung entstehen,
muss als connector das eigene Objekt und als connected muss von außen iteriert werden.
Das funktioniert aber nur, wenn bei dem abspeichern dieser Daten genauso viele Output-Neuronen
vorhanden waren, wie jetzt auch!
"""
for connName in conns:
if connName not in map(lambda x: x.name, self.verbindungen):
for outputNeuron in outputNeurons:
obj = Connection(connName, self, outputNeuron, conns[connName])
self.verbindungen.append(obj)
def make_unicode(self, string):
"""
Erzeugt ISO-8859-1 codierte codes
:rtype: basestring
:type string: str
"""
try:
if isinstance(string, unicode):
return string
elif isinstance(string, str):
return unicode(string, "ISO-8859-1")
except UnicodeDecodeError:
return ""
class Output_Neuron():
def __init__(self, name):
self.name = name
self.netinput = 0
self.connections = []
def addList(self, connection):
self.connections.append(connection)
def isActive(self):
return self.netinput
def getDict(self):
"""
:rtype: dict
"""
myDict = self.__dict__
del myDict['connections']
# myDict in Unicode "ISO-8859-1" verwandeln
for key in myDict:
if isinstance(myDict[key], list):
myDict[key] = map(lambda x: unicode(x, "ISO-8859-1"), myDict[key])
elif not isinstance(myDict[key], int):
if isinstance(myDict[key], str): myDict[key] = unicode(myDict[key], "ISO-8859-1")
return myDict
def makeOutput(self):
for connect in self.connections:
self.netinput += connect.getSignal()
return self.isActive()
def train():
"""
Trainingsphase
Lernen ist supervised durch User
Schritte:
1) Laden alter Daten
2) Lesen der zu bearbeitetenden Wörter (Nomen)
3) Überprüfen, ob es ein Nomen sein soll oder nicht
4) Erzeugen aller !möglichen! letzten Silben
5) Erzeugen neuer Input-Neuronen-Nachbarschaften, oder aber alte aktualisieren, indem man die Gewichtungen
der Verbindungen um 0.5 erhöht
6) Nun wird das Nomen bei jedem Input-Neuronen 'eingegeben' und somit die Signale und Verbindungen aktiviert
7) Nun werden die Gewichtungen von hinten nach vorne verändert (sie werden 'belehrt', supervised)
8) Beim Erreichen des Endes der Datei werden die Neuronen gespeichert
" n" zeigt an, dass das vorherige Wort kein Nomen ist
"""
def _save():
"""Diese Funktion soll InputNeuronen speichern, sodass man sie
später wieder laden kann und keine Daten verloren gehen
Mit JSONEncoder und o.__dict__ dafür muss jedoch noch eine Funktion
in den Klassen definiert werden"""
f_input = open("saved_input.json", "w")
f_output = open("saved_output.json", "w")
changedInput = _changeClass(inputNeuronen)
changedOutput = _changeClass(outputNeuronen)
open("write.txt", "w").write(str(changedInput))
f_input.write(json.dumps(changedInput))
f_output.write(json.dumps(changedOutput))
def _changeClass(data):
"""
Verändert Objekte der Art [[Class, Class], [Class, Class, Class], ...] in
von JSON verarbeitbare 2D-Listen mit Dictionarys anstatt von Klassen
Diese Dictionarys müssen von loadData() und save() verarbeitbar sein.
neighborhood := die '2. Dimension' der Liste (Neuronen-Nachbarschaft)
Diese Funktion überträgt alle Daten aus data -> output, wobei sie jedoch die
Klassen per __dict__-Aufruf in ein Dictionary umwandeln.
:rtype: list
:type data: list
"""
output = []
for neighborhoodCount in range(len(data)):
if isinstance(data[neighborhoodCount], Output_Neuron):
output.append(data[neighborhoodCount].getDict())
else:
output.append([])
for objCount in range(len(data[neighborhoodCount])):
if isinstance(data[neighborhoodCount][objCount], list):
# Listen abfangen und per map() bearbeiten
output[neighborhoodCount].append(map(
lambda x: data[neighborhoodCount][objCount].getDict(),
xrange(len(data[neighborhoodCount][objCount])))
)
else:
output[neighborhoodCount].append(data[neighborhoodCount][objCount].getDict())
return output
def _learn(aktiveNeuron):
"""
Guckt alle Neuronen-Nachbarschaften von hinten nach vorne nach
Erstellt 2 Listen: Sendende / Nicht-Sendende
verringert gewichtung der Sendenden und steigert die der sendenden um denselben Wert
benötigt aktives Output-Neuron
:type aktiveNeuron: Output_Neuron
"""
conns = aktiveNeuron.connections
sending = []
notsending = []
for conn in conns:
if conn.mode == "on":
sending.append(conn)
conn.weight -= 0.1
else:
notsending.append(conn)
conn.weight += 0.1
def _getobj(objName):
"""
Versucht zu gegebenen Namen das richtige Neuron zu finden
Dabei nutzt der Algorithmus aus, dass alle Neuronen in einer Nachbarschaft Ähnlichkeiten haben Bsp.:
[['aus', 'us'], ['ab', 'b']]
Also muss, wenn der Name des gesuchten Neurons in der Nachbarschaft drin ist, der Name
ähnlichkeit mit dem ersten Wert dieser Nachbarschaft haben
objName -> Letzte Silbe des eigentlichen Wortes!
:type objName: str
:rtype: Input_Neuron
"""
try:
for neuron1, neuron2 in inputNeuronen:
if neuron1.name in objName or objName in neuron1.name:
if neuron1.name == objName:
return neuron1
elif neuron2.name == objName:
return neuron2
except ValueError:
pass
#1
outputNeuronen, inputNeuronen = loadData()
typ = 'nom'
f = open("nomen.txt", "r")
neuronNeighbors = []
inputNames = []
for neighbor in inputNeuronen:
if neighbor:
inputNames += map(lambda x: x.name, neighbor)
outputIsNomen = Output_Neuron("IsNomen")
outputIsAdjektive = Output_Neuron("IsAdjektive")
outputIsVerb = Output_Neuron("IsVerb")
outputNeuronen = [outputIsNomen, outputIsAdjektive, outputIsVerb]
#2
for nomen in f.readlines():
nomen = nomen.strip("\n")
nomen = nomen.split(" ")
#3
if len(nomen) > 1:
typ = {"v": "ver", "a": "adj"}[nomen[1]]
print "[+] Found %s" % typ
nomen = nomen[0].strip(" " + typ)
else:
typ = "nom"
print "[+] Found Nomen"
#4
lenInput = len(nomen)
neuronNeighborsNames = [nomen[lenInput-x:] for x in range(2, 4)]
outputTypes = {"nom": outputIsNomen, "ver": outputIsVerb, "adj": outputIsAdjektive}
#5
for name in neuronNeighborsNames:
for typstr in ['nom', 'ver', "adj"]:
if typ == typstr:
if name in inputNames:
# _getobj gibt das Input-Neuron mit dem Angegebenen Namen wieder
if _getobj(name): _getobj(name).changeWeight(0.5)
else:
neuronNeighbors.append(Input_Neuron(name, outputTypes[typstr]))
inputNeuronen.append(neuronNeighbors)
neuronNeighbors = []
# Allen Input Neuronen 'nomen' als Eingabe geben
#6
inputNeuronen = filter(lambda x: x != [], inputNeuronen)
for neighbor in inputNeuronen:
for neuron in neighbor:
neuron.inputValue(nomen)
# Aktives Output-Neuron finden und "belehren"
#7
if typ == "nom":
_learn(outputIsNomen)
elif typ == "ver":
_learn(outputIsVerb)
else:
_learn(outputIsAdjektive)
#8
if inputNeuronen:
_save()
def test():
outputNeuronen, inputNeuronen = loadData()
print "Got %d Output-Neuronen and %d Input-Neuronen loaded" % (len(outputNeuronen), len(inputNeuronen) * 2)
outputs = {}
while True:
eingabe = raw_input("Nomen Eingeben:")
if eingabe == "end":
break
for neighborhood in inputNeuronen:
for neuron in neighborhood:
neuron.inputValue(eingabe)
for neuron in outputNeuronen:
outputs[neuron.name] = neuron.makeOutput()
biggestValue = max(outputs.values())
for name in outputs:
if outputs[name] == biggestValue:
print name
break
def loadData():
"""
Diese Funktion soll alle gespeicherten Daten wieder starten
Dazu muss diese Funktion auch Dictionarys in die dazugehörigen Klassen
zurückverwandeln
:rtype: list
Ausgabe sind 2 Listen (outputNeuronen, inputNeuronen), welche bei nicht vorhandendem bzw. leeren
Dateien ebenfalls leer sind.
Output-Neuronen werden verbunden und sind am Anfang ohne Verbindung, weshalb sie einfach nur mit
einem Namen initialiesiert werden müssen.
Connections werden in den Klassen (Input-Neuronen, Output-Neuronen) gespeichert.
Input-Neuronen müssen jedoch mit den richtigen Verbindungen verbunden werden, welche auch die richtige
Gewichtung haben müssen. Das Gewichten übernimmt eine Funktion der Klasse Input-Neuronen(getNewConnection)
Da JSON keine Klassen abspeichern kann, werden die wichtigsten Infos über eine Funktion in einem Dictionary
gespeichert(Name, Gewichtung).
Damit noch weniger Klassen bei dem Starten genutzt werden, müssen auch die in der Input-Neuronen gespeicherten
Klasse Output_Neuronen nocheinmal nur als Name gespeichert werden. Dann werden diese in der wiederhergestellten
Liste von Output-Neuronen gesucht und als Parameter für die IN genutzt
"""
f_input = open("saved_input.json", "r")
f_output = open("saved_output.json", "r")
inputNeuronen = []
nachbarschaft = []
outputNeuronen = []
try:
oldInputNeuronen = json.load(f_input, encoding='ISO-8859-1')
oldOutputNeuronen = json.load(f_output, encoding='ISO-8859-1')
except ValueError:
return [], []
# Verbindungen verbinden sich automatisch mit Output_Neuronen
if not oldOutputNeuronen:
oldOutputNeuronen = [{'name': "IsNomen"}, {'name': "IsNoNomen"}]
for obj in oldOutputNeuronen:
neuronobj = Output_Neuron(obj['name'])
outputNeuronen.append(neuronobj)
# Getting outputNeuronenNames
outputNeuronenNames = map(lambda neuron: neuron.name, outputNeuronen)
for nachbarschaften in oldInputNeuronen:
for obj in nachbarschaften:
# Indexierung bei outputNeuronNames und outputNeuronen dieselbe
# Sucht Index von der Klasse mit dem Namen und gibt die Klasse zurück für 1(!) Neuronen
# Wenn Neuron2 doch aktiv sein muss hier ändern : TODO
neuronen = [outputNeuronen[outputNeuronenNames.index(obj['neuronNames'][0])]]
neuronobj = Input_Neuron(obj['reiz'], neuronen[0])
neuronobj.count_con = obj['count_con']
neuronobj.getNewConnection(obj['verb_infos'], outputNeuronen)
nachbarschaft.append(neuronobj)
inputNeuronen.append(nachbarschaft)
nachbarschaft = []
return outputNeuronen, inputNeuronen
if __name__ == "__main__":
mode = raw_input("In welchem Modus soll gestartet werden(test/train): ")
if mode == "test":
test()
elif mode == "train":
train()
else:
print "Wrong Input!"
| StarcoderdataPython |
1875605 | <reponame>iamnapo/tesla-web-mining<gh_stars>1-10
# <NAME>, AEM: 16
# <NAME>, AEM: 9
# Script that iterates through tweets, calculates their polarity using the textblob library, saves the 'overall_sentiment'
# base on polarity's value and the calculates and creates a plot of the daily count of each sentiment (positive, negative,
# neutral), using plotly
import json
import os
from datetime import datetime, timedelta
import plotly.graph_objs as go
from plotly.offline import plot
from pymongo import ASCENDING, DESCENDING, MongoClient
from textblob import TextBlob
MONGO_HOST = os.getenv('MONGO_HOST')
client = MongoClient(MONGO_HOST)
db = client.twitterdb
tweets = db.twitter_search
# Query all tweets
for tweet in tweets.find({ "overall_sentiment": None }):
# Decide on their sentiment, based on polarity value
polarity = TextBlob(tweet["clean_tweet"]).sentiment.polarity
if polarity > 0:
sentiment = "positive"
elif polarity < 0:
sentiment = "negative"
else:
sentiment = "neutral"
# Update them with the new 'overall_sentiment' property
tweets.update_one({ "_id": tweet["_id"] }, { "$set": { "overall_sentiment": sentiment } })
# Earliest & latest timestamps
start_date = datetime.fromtimestamp(
float(list(tweets.find().sort("timestamp_ms", ASCENDING).limit(1))[0]["timestamp_ms"]) / 1000).date()
end_date = datetime.fromtimestamp(
float(list(tweets.find().sort("timestamp_ms", DESCENDING).limit(1))[0]["timestamp_ms"]) / 1000).date()
# Initialize a dict of arrays in order to save sentiments daily counts
overall_sentiment_daily_counts = dict()
for sentiment in ["negative", "neutral", "positive"]:
overall_sentiment_daily_counts[sentiment] = [0 for i in range((end_date - start_date).days + 1)]
# Calculate sentiment daily count
for tweet in tweets.find({ }, { "overall_sentiment": 1, "_id": 0, "timestamp_ms": 1 }):
tweet_date = datetime.fromtimestamp(float(tweet["timestamp_ms"]) / 1000).date()
overall_sentiment_daily_counts[tweet["overall_sentiment"]][(tweet_date - start_date).days] += 1
data = []
labels = [(start_date + timedelta(days = i)).strftime("%d/%m") for i in range((end_date - start_date).days + 1)]
ticks = [x for x in range((end_date - start_date).days + 1)]
layout = go.Layout(title = 'Overall sentiment per day', xaxis = go.layout.XAxis(ticktext = labels, tickvals = ticks),
barmode = 'relative')
tmp = []
# Create an interactive HTMl plot of the results
for sentiment, counts in overall_sentiment_daily_counts.items():
if sentiment == 'positive':
color = 'green'
elif sentiment == 'negative':
color = 'red'
else:
color = 'gray'
data.append({ 'x': ticks, 'y': counts, 'name': sentiment, 'type': 'bar', 'marker': { 'color': color } })
tmp.append({ 'y': counts, 'color': color, 'name': sentiment })
fig = go.Figure(data = data, layout = layout)
plot(fig, filename = 'ov_sentiment_per_day.html')
# Also save them to a json file, so we can recreate the interactive plot in the website
with open('ov_sentiment_per_day.json', 'w', encoding = 'utf-8') as outfile:
json.dump({ "data": tmp, "ticks": ticks, "labels": labels }, outfile, ensure_ascii = False, indent = 2)
| StarcoderdataPython |
1818938 | # Generated by Django 3.0.2 on 2020-02-12 15:37
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('LibreBadge', '0001_initial'),
]
operations = [
migrations.RenameModel(
old_name='WelcomMessage',
new_name='WelcomeMessage',
),
]
| StarcoderdataPython |
4957026 | # -*- coding: utf-8 -*-
import time
class EnumBool:
UNKNOWN = 'unknown'
NO = 'no'
YES = 'yes'
def retry(max_attempts, sleep_time):
def retry_decorator(function):
def _wrapper(*args, **kwargs):
attempt = 0
while True:
attempt += 1
try:
return function(*args, **kwargs)
except:
if attempt >= max_attempts:
return
time.sleep(sleep_time)
return _wrapper
return retry_decorator
| StarcoderdataPython |
3530938 | <gh_stars>0
from utils import get_mnist, sample_reconstructions, plot_samples
from vae import SampleLayer, vae_log_likelihood
import theano
import theano.tensor as T
import lasagne
from lasagne.nonlinearities import rectify, identity
import numpy as np
from sklearn.cross_validation import train_test_split
if __name__ == '__main__':
data = get_mnist()
train, test = train_test_split(data, test_size=0.1)
_train = theano.shared(train, borrow=True)
_test = theano.shared(test, borrow=True)
batch_size = 500
latent_size = 2
target = T.matrix()
encoder = lasagne.layers.InputLayer((None, train.shape[1]), target)
encoder = lasagne.layers.DenseLayer(encoder, num_units=100, nonlinearity=rectify)
mean = lasagne.layers.DenseLayer(encoder, num_units=latent_size, nonlinearity=identity)
log_sigma = lasagne.layers.DenseLayer(encoder, num_units=latent_size, nonlinearity=identity)
z = SampleLayer(mean=mean, log_sigma=log_sigma)
decoder1 = lasagne.layers.DenseLayer(z, num_units=100, nonlinearity=rectify)
decoder2 = lasagne.layers.DenseLayer(decoder1, num_units=train.shape[1], nonlinearity=rectify)
decoder = decoder2
z_actual = lasagne.layers.get_output(z, deterministic=False)
z_mean = lasagne.layers.get_output(mean, deterministic=False)
z_log_sigma = lasagne.layers.get_output(log_sigma, deterministic=False)
recon = lasagne.layers.get_output(decoder, deterministic=False)
ll = vae_log_likelihood(z_actual, z_mean, z_log_sigma, recon, target)
ll /= batch_size
params = lasagne.layers.get_all_params(decoder, trainable=True)
updates = lasagne.updates.adam(-ll, params, learning_rate=0.001)
i = T.iscalar()
train_fn = theano.function(
[i], ll, updates=updates,
givens={
target: _train[i * batch_size: (i + 1) * batch_size]
})
test_fn = theano.function(
[i], ll,
givens={
target: _test[i * batch_size: (i + 1) * batch_size]
})
num_train_batches = train.shape[0] / batch_size
num_test_batches = test.shape[0] / batch_size
for e in xrange(30):
train_errs = []
test_errs = []
for idx in xrange(num_train_batches):
train_errs.append(train_fn(idx))
for idx in xrange(num_test_batches):
test_errs.append(test_fn(idx))
print 'epoch', e, 'train err', np.mean(train_errs), 'test err', np.mean(test_errs)
sample_reconstructions(test, recon, target)
# construct separate decoder
z_input = T.matrix()
single_decoder = lasagne.layers.InputLayer((None, latent_size), z_input)
single_decoder = lasagne.layers.DenseLayer(single_decoder, num_units=100, nonlinearity=rectify, W=decoder1.W, b=decoder1.b)
single_decoder = lasagne.layers.DenseLayer(single_decoder, num_units=100, nonlinearity=rectify, W=decoder2.W, b=decoder2.b)
decode = theano.function([z_input], lasagne.layers.get_output(single_decoder))
plot_samples(decode)
| StarcoderdataPython |
4854971 | <reponame>rackerlabs/bobby
"""Tests for bobby.worker."""
import json
import mock
from twisted.internet import defer
from twisted.trial import unittest
from silverberg.client import CQLClient
from bobby import worker
from bobby.ele import MaasClient
class TestBobbyWorker(unittest.TestCase):
"""Test bobby.worker.BobbyWorker."""
def setUp(self):
"""Mock CQLClient and MaasClient."""
self.client = mock.create_autospec(CQLClient)
self.maas_client = mock.create_autospec(MaasClient)
patcher = mock.patch('bobby.worker.MaasClient')
self.addCleanup(patcher.stop)
_MaasClient = patcher.start()
_MaasClient.return_value = self.maas_client
@mock.patch('bobby.worker.cass')
def test_create_group(self, cass):
"""Test BobbyWorker.create_group."""
expected = {'groupId': 'group-abc',
'tenantId': '101010',
'notification': 'notification-def',
'notificationPlan': 'notificationPlan-ghi'}
self.maas_client.add_notification_and_plan.return_value = defer.succeed(
(expected['notification'], expected['notificationPlan']))
cass.create_group.return_value = defer.succeed(expected)
w = worker.BobbyWorker(self.client)
d = w.create_group(expected['tenantId'], expected['groupId'])
result = self.successResultOf(d)
self.assertEqual(result, expected)
self.maas_client.add_notification_and_plan.assert_called_once_with()
cass.create_group.assert_called_once_with(
self.client, '101010', 'group-abc', 'notification-def', 'notificationPlan-ghi')
@mock.patch('bobby.worker.cass')
def test_delete_group(self, cass):
"""Test BobbyWorker.delete_group."""
cass.get_group_by_id.return_value = defer.succeed({
'notification': 'notification-abc',
'notificationPlan': 'notificationPlan-def'})
self.maas_client.remove_notification_and_plan.return_value = defer.succeed(None)
cass.delete_group.return_value = defer.succeed(None)
w = worker.BobbyWorker(self.client)
d = w.delete_group('tenant-abc', 'group-def')
self.successResultOf(d)
cass.get_group_by_id.assert_called_once_with(self.client, 'tenant-abc', 'group-def')
self.maas_client.remove_notification_and_plan.assert_called_once_with(
'notification-abc', 'notificationPlan-def')
cass.delete_group.assert_called_once_with(
self.client, 'tenant-abc', 'group-def')
@mock.patch('bobby.worker.cass')
def test_create_server(self, cass):
cass.get_server_by_server_id.return_value = defer.succeed({
'serverId': 'server-abc', 'entityId': 'entity-abc'})
cass.get_group_by_id.return_value = defer.succeed({
'notificationPlan': 'plan-xyz'})
cass.get_policies_by_group_id.return_value = defer.succeed([{
'policyId': 'policy-abc',
'checkTemplate': 'check-abc',
'alarmTemplate': 'alarm-def'}])
self.maas_client.add_check.return_value = defer.succeed({'id': 'check-xyz'})
self.maas_client.add_alarm.return_value = defer.succeed({'id': 'alarm-xyz'})
cass.create_server.return_value = defer.succeed(None)
self.maas_client.create_entity.return_value = defer.succeed('entity-abc')
server = {
'OS-DCF:diskConfig': 'AUTO',
'adminPass': '<PASSWORD>',
'id': 'server-abc',
'links': [
{
'href': 'https://dfw.servers.api.rackspacecloud.com/v2/010101/servers/ef08aa7a',
'rel': 'self'
},
{
'href': 'https://dfw.servers.api.rackspacecloud.com/010101/servers/ef08aa7a',
'rel': 'bookmark'
}
]
}
w = worker.BobbyWorker(self.client)
d = w.create_server('tenant-abc', 'group-def', server)
self.successResultOf(d)
self.maas_client.create_entity.assert_called_once_with(server)
cass.create_server.assert_called_once_with(
self.client, 'tenant-abc', server['id'], 'entity-abc', 'group-def')
cass.get_server_by_server_id.assert_called_once_with(server['id'])
cass.register_policy_on_server.assert_called_once_with(self.client, 'policy-abc', server['id'], 'alarm-xyz', 'check-xyz')
@mock.patch('bobby.worker.cass')
def test_delete_server(self, cass):
cass.delete_server.return_value = defer.succeed(None)
cass.get_server_by_server_id.return_value = defer.succeed({
'serverId': 'server-abc', 'entityId': 'entity-abc'})
self.maas_client.delete_entity.return_value = defer.succeed(None)
w = worker.BobbyWorker(self.client)
d = w.delete_server('tenant-abc', 'group-def', 'server-abc')
self.successResultOf(d)
cass.get_server_by_server_id.assert_called_once_with(
self.client, 'tenant-abc', 'group-def', 'server-abc')
self.maas_client.delete_entity.assert_called_once_with('entity-abc')
cass.delete_server.assert_called_once_with(
self.client, 'tenant-abc', 'group-def', 'server-abc')
@mock.patch('bobby.worker.MaasClient')
def test_apply_policies_to_server(self, FakeMaasClient):
"""Test BobbyWorker.apply_policies_to_server."""
maas_client = mock.create_autospec(MaasClient)
FakeMaasClient.return_value = maas_client
new_check = {
u'created_at': 1,
u'details': {u'file': u'blah',
u'args': u'blah'},
u'disabled': False,
u'id': u'check-abc',
u'label': u'Test check 1',
u'period': 100,
u'type': u'agent.plugin'}
def add_check(*args):
return defer.succeed(new_check)
maas_client.add_check.side_effect = add_check
new_alarm = {
"id": "alAAAA",
"check_id": "chAAAA",
"criteria": "if (metric[\"duration\"] >= 2) { return new AlarmStatus(OK); } return new AlarmStatus(CRITICAL);"}
def add_alarm(*args):
return defer.succeed(new_alarm)
maas_client.add_alarm.side_effect = add_alarm
example_check_template = json.dumps({
'type': 'agent.plugin',
'details': {'file': 'blah',
'args': 'blah'}
})
self.client.execute.return_value = defer.succeed(None)
w = worker.BobbyWorker(self.client)
d = w.add_policy_to_server('t1', 'p1', 's1', 'enOne',
example_check_template, "ALARM_DSL", "npBlah")
result = self.successResultOf(d)
self.assertEqual(result, None)
maas_client.add_check.assert_called_once_with(
'p1', 'enOne',
'{"type": "agent.plugin", "details": {"args": "blah", "file": "blah"}}')
self.client.execute.assert_called_once_with(
'INSERT INTO serverpolicies ("serverId", "policyId", "alarmId", '
'"checkId", state) VALUES (:serverId, :policyId, :alarmId, '
':checkId, false);',
{'checkId': u'check-abc', 'serverId': 's1', 'policyId': 'p1',
'alarmId': 'alAAAA'},
1)
@mock.patch('bobby.worker.MaasClient')
def test_add_policy_to_server(self, FakeMaasClient):
""" Basic success case """
maas_client = mock.create_autospec(MaasClient)
FakeMaasClient.return_value = maas_client
new_check = {
u'created_at': 1,
u'details': {u'file': u'blah',
u'args': u'blah'},
u'disabled': False,
u'id': u'check-abc',
u'label': u'Test check 1',
u'period': 100,
u'type': u'agent.plugin'}
def add_check(*args):
return defer.succeed(new_check)
maas_client.add_check.side_effect = add_check
new_alarm = {
"id": "alAAAA",
"check_id": "chAAAA",
"criteria": "if (metric[\"duration\"] >= 2) { return new AlarmStatus(OK); } return new AlarmStatus(CRITICAL);"}
def add_alarm(*args):
return defer.succeed(new_alarm)
maas_client.add_alarm.side_effect = add_alarm
expected = [{'policyId': 'policy-abc',
'groupId': 'group-def',
'alarmTemplate': 'alarmTemplate-ghi',
'checkTemplate': 'checkTemplate-jkl'},
{'policyId': 'policy-xyz',
'groupId': 'group-def',
'alarmTemplate': 'alarmTemplate-uvw',
'checkTemplate': 'checkTemplate-rst'}]
def execute(query, data, consistency):
if 'INSERT' in query:
return defer.succeed(None)
elif 'SELECT' in query:
if 'groups' in query:
return defer.succeed([{
'groupId': 'group-abc',
'notificationPlan': 'plan-abc'}])
elif 'policies' in query:
return defer.succeed(expected)
self.client.execute.side_effect = execute
w = worker.BobbyWorker(self.client)
d = w.apply_policies_to_server('101010', 'group-abc', 'server1', 'enOne')
result = self.successResultOf(d)
self.assertEqual(result, None)
self.assertEqual(self.client.execute.mock_calls, [
mock.call(
'SELECT * FROM groups WHERE "tenantId"=:tenantId AND "groupId"=:groupId;',
{'groupId': 'group-abc', 'tenantId': '101010'},
1),
mock.call(
'SELECT * FROM policies WHERE "groupId"=:groupId;',
{'groupId': 'group-abc'}, 1),
mock.call(
'INSERT INTO serverpolicies ("serverId", "policyId", '
'"alarmId", "checkId", state) VALUES (:serverId, :policyId, '
':alarmId, :checkId, false);',
{'checkId': u'check-abc', 'serverId': 'server1',
'policyId': 'policy-abc', 'alarmId': 'alAAAA'},
1),
mock.call(
'INSERT INTO serverpolicies ("serverId", "policyId", '
'"alarmId", "checkId", state) VALUES (:serverId, :policyId, '
':alarmId, :checkId, false);',
{'checkId': u'check-abc', 'serverId': 'server1',
'policyId': 'policy-xyz', 'alarmId': 'alAAAA'},
1)])
| StarcoderdataPython |
11246435 | <reponame>joshiaj7/CodingChallenges
"""
Space : O(n)
Time : O(n)
"""
class Solution:
def climbStairs(self, n: int) -> int:
dp = [0] * 3
dp[0] = 1
dp[1] = 1
dp[2] = 2
if n < 3:
return dp[n]
dp = dp + ([0] * (n-2))
for i in range(3, n+1):
dp[i] = dp[i-1] + dp[i-2]
return dp[n]
| StarcoderdataPython |
4828594 | # -*- coding: utf-8 -*-
from enum import Enum
class SquareType(Enum):
WHITE = 0
BLACK = 1
EMPTY = 2
ERROR = 3
class GameStatus(Enum):
WHITE_WINS = 0
BLACK_WINS = 1
DRAW = 2
PLAYING = 3
ERROR = 4
| StarcoderdataPython |
3552779 | <gh_stars>1-10
value = 1
<caret>if (value >= 1 and value <= 100) or (value >= 200 and value <= 300):
print("In range 1..100 or 200..300")
else:
print("Not in range 1..100 or 200..300") | StarcoderdataPython |
3573780 | import json
from rest_framework import serializers
from django.core.exceptions import ValidationError
from django.utils.translation import gettext_lazy as _
from .models import Author, FriendRequest, Post, Comment, Like
from .converter import *
class AuthorSerializer(serializers.ModelSerializer):
type = serializers.CharField(default="author", read_only=True)
id = serializers.URLField(source="get_id", read_only=True)
url = serializers.URLField(allow_blank=True)
displayName = serializers.CharField(source="display_name", allow_null=True)
github = serializers.URLField(source="github_url", allow_blank=True, allow_null=True)
profileImage = serializers.URLField(source="profile_image", allow_blank=True, allow_null=True)
class Meta:
model = Author
fields = ("type","id","host","displayName","url","github","profileImage")
# Override the default update function to apply on certain field
def update(self, instance, validated_data):
instance.github_url = validated_data.get("github_url", instance.github_url)
instance.display_name = validated_data.get("display_name", instance.display_name)
instance.profile_image = validated_data.get("profile_image", instance.profile_image)
instance.save()
return instance
# Validate the github url field
def validate_github_url(self, value):
if value:
value = value[value.find("//") + 2:]
value = value[:value.find("/")]
if not value.contains("github.com"):
raise ValidationError(_("Author's github url must be a github url"))
return value
class CommentSerializer(serializers.ModelSerializer):
type = serializers.CharField(default="comment", read_only=True)
id = serializers.URLField(source="get_id", read_only=True)
contentType = serializers.CharField(source="content_type")
author = AuthorSerializer(read_only=False)
numLikes = serializers.IntegerField(source="get_num_likes", read_only=True)
class Meta:
model = Comment
fields = ("type", "author", "comment", "contentType", "published", "numLikes", "id")
# Override the default create function to deserialize the author
def create(self, validated_data):
author_data = validated_data.pop('author', None)
if author_data:
author = Author.objects.get(url=author_data['url'])
validated_data['author'] = author
comment = Comment.objects.create(**validated_data)
return comment
class PostSerializer(serializers.ModelSerializer):
type = serializers.CharField(default="post", read_only=True)
id = serializers.URLField(source="get_id", read_only=True)
contentType = serializers.CharField(source='content_type', required=False)
# https://www.tomchristie.com/rest-framework-2-docs/api-guide/serializers#dealing-with-nested-objects
comments = serializers.URLField(source='get_comment_url', required=False,read_only=True)
source = serializers.URLField(source='get_source_url', required=False,read_only=True)
origin = serializers.URLField(source='get_origin_url', required=False,read_only=True)
author = AuthorSerializer(read_only=False)
categories = serializers.SerializerMethodField()
numLikes = serializers.IntegerField(source="get_num_likes", read_only=True)
commentsSrc = serializers.SerializerMethodField()
class Meta:
model = Post
fields = ("type","id","url","title","source",
"origin","description","contentType",
"content","author","categories","comments","commentsSrc","numLikes",
"published","visibility","unlisted")
# Override the default update function to apply on certain field
def update(self, instance, validated_data):
instance.title = validated_data.get("title", instance.title)
instance.description = validated_data.get("description", instance.description)
instance.content_type = validated_data.get("content_type", instance.content_type)
instance.content = validated_data.get("content", instance.content)
instance.published = validated_data.get("published", instance.published)
instance.visibility = validated_data.get("visibility", instance.visibility)
instance.unlisted = validated_data.get("unlisted", instance.unlisted)
instance.save()
return instance
# Override the default create function to deserialize the author
def create(self, validated_data):
author_data = validated_data.pop('author', None)
if author_data:
author = Author.objects.get_or_create(url=author_data['url'])[0]
validated_data['author'] = author
post = Post.objects.create(**validated_data)
return post
def get_categories(self, obj):
try:
json_list = json.loads(obj.categories)
except:
json_list = []
return json_list
def get_commentsSrc(self, obj):
comments_list = list(obj.comments.all())
comment_dict_list = CommentSerializer(comments_list, many=True).data
comments_src = {
'type':'comments',
'post': obj.get_id(),
'id': obj.get_comment_url(),
'comments': comment_dict_list
}
return comments_src
class LikeSerializer(serializers.ModelSerializer):
type = serializers.CharField(default="Like", read_only=True)
# https://www.tomchristie.com/rest-framework-2-docs/api-guide/serializers#dealing-with-nested-objects
author = AuthorSerializer(many=False, required=True)
class Meta:
model = Like
fields = ("summary","type","author","object")
# This will create or get a Like object
def create(self, validated_data):
author_data = validated_data.pop("author", None)
if author_data:
author = Author.objects.get(**author_data)
validated_data["author"] = author
like, created = Like.objects.get_or_create(**validated_data)
return like
class FriendRequestSerializer(serializers.ModelSerializer):
type = serializers.CharField(default="Follow", read_only=True)
actor = AuthorSerializer(many=False, required=True)
object = AuthorSerializer(many=False, required=True)
class Meta:
model = FriendRequest
fields = ("type", "summary", "actor", "object")
def create(self, validated_data):
actor_data = validated_data.pop("actor", None)
if actor_data:
actor = Author.objects.get(**actor_data)
validated_data["actor"] = actor
object_data = validated_data.pop("object", None)
if object_data:
object = Author.objects.get(**object_data)
validated_data["object"] = object
friend_request = Like.objects.create(**validated_data)
print(friend_request)
return friend_request | StarcoderdataPython |
46422 | from PySide6.QtCore import Qt, QThread, Signal, QPoint, QRect, QLocale, QTranslator, QCoreApplication, QThreadPool, \
QObject, QRunnable
from PySide6.QtGui import QCursor, QPixmap, QIcon, QFont
from PySide6.QtWidgets import QApplication, QMessageBox
from gui_about import Ui_AboutWindow
from gui_main import Ui_MainWindow
from os.path import getsize, join
from packaging import version
import webbrowser
import subprocess
import requests
import img_res # skipcq: PYL-W0611
import json
import sys
import os
__version__ = "1.11.0"
def resource_path(relative_path):
"""Determine resource path if app is built or run natively."""
if hasattr(sys, 'frozen'):
return os.path.join(sys._MEIPASS, relative_path) # skipcq: PYL-W0212
return os.path.join(os.path.abspath('.'), relative_path)
def get_dir_size(dir_path):
"""Get directory size of installed apps."""
dir_size = 0
for root, _, files in os.walk(dir_path):
dir_size += sum([getsize(join(root, name)) for name in files])
return dir_size
class Logic():
def __init__(self):
about.label_version.setText(QCoreApplication.translate("Label", "Version") + f" {__version__}")
self.total_size = 0
self.is_link_menu = False
self.main_title = QCoreApplication.translate("Label", "Select the default Windows 10 apps to uninstall:\n(Hover over app names to view description)")
self.store_title = QCoreApplication.translate("Label", "Click on an app name to view it in Microsoft Store.")
self.refresh_title = QCoreApplication.translate("Label", "Refreshing list of installed apps...")
self.size_text = QCoreApplication.translate("Label", "MB")
self.github_dialog = QCoreApplication.translate("MessageBox", "Visit the PyDebloatX GitHub page?")
self.quit_dialog = QCoreApplication.translate("MessageBox", "Quit PyDebloatX?")
self.dialog_yes = QCoreApplication.translate("Button", "Yes")
self.dialog_no = QCoreApplication.translate("Button", "No")
self.dialog_ok = QCoreApplication.translate("Button", "OK")
self.success_text = QCoreApplication.translate("MessageBox", "All selected apps were successfully uninstalled.")
self.main_widgets = (ui.refresh_btn, ui.refresh_bind, ui.store_btn, ui.store_bind, ui.button_select_all, ui.button_deselect_all, ui.button_uninstall)
self.apps_dict = ui.apps_dict
ui.progressbar.setValue(0)
ui.progressbar.setMaximum(len(self.apps_dict))
ui.progressbar.setFont(ui.font)
ui.layout_widget_labels.adjustSize()
for layout in (ui.layout_checkboxes, ui.layout_checkboxes_2, ui.layout_checkboxes_3):
layout.addStretch()
layout.setSpacing(14)
for layout_widget in (ui.layout_widget_checkboxes, ui.layout_widget_checkboxes_2, ui.layout_widget_checkboxes_3):
layout_widget.adjustSize()
ui.button_uninstall.clicked.connect(self.uninstall)
ui.button_select_all.clicked.connect(self.select_all)
ui.button_deselect_all.clicked.connect(self.deselect_all)
ui.refresh_btn.clicked.connect(self.app_refresh)
ui.refresh_bind.activated.connect(self.app_refresh)
ui.store_btn.clicked.connect(self.store_menu)
ui.store_bind.activated.connect(self.store_menu)
ui.homepage_btn.clicked.connect(self.app_homepage)
ui.homepage_bind.activated.connect(self.app_homepage)
ui.about_btn.clicked.connect(self.app_about)
ui.about_bind.activated.connect(self.app_about)
ui.quit_btn.clicked.connect(self.app_quit)
ui.quit_bind.activated.connect(self.app_quit)
about.button_quit_about.clicked.connect(about.close)
for checkbox in ui.checkbox_list:
checkbox.clicked.connect(self.enable_buttons)
self.app_refresh()
self.check_updates()
def store_menu(self):
"""Toggle between Main view and Store view."""
widgets = (ui.layout_widget_buttons, ui.label_space, ui.label_size)
if self.is_link_menu:
self.is_link_menu = False
ui.label_info.setText(self.main_title)
ui.store_btn.setIcon(QIcon(':/icon/store_icon.png'))
for i in self.apps_dict:
i.setEnabled(False)
i.setChecked(False)
for i in self.installed_apps:
i.setEnabled(True)
for i in self.selected_apps:
i.setChecked(True)
self.enable_buttons()
for widget in widgets:
widget.show()
else:
self.is_link_menu = True
ui.label_info.setText(self.store_title)
ui.store_btn.setIcon(QIcon(':/icon/back_icon.png'))
for i in self.apps_dict:
i.setEnabled(True)
i.setChecked(True)
for widget in widgets:
widget.hide()
def check_updates(self):
"""Check for updates."""
self.check_updates_thread = CheckUpdates()
self.check_updates_thread.version_signal.connect(self.show_updates)
self.check_updates_thread.start()
def show_updates(self, latest_version):
"""Show updates."""
if version.parse(latest_version) > version.parse(__version__):
msg_update = QCoreApplication.translate("MessageBox", "PyDebloatX {0} is available.\n\nVisit download page?").format(latest_version)
if self.message_box(msg_update, 2) == QMessageBox.Yes:
webbrowser.open_new('https://github.com/Teraskull/PyDebloatX/releases')
def app_refresh(self):
"""Create threads to refresh list of installed apps."""
if self.is_link_menu:
self.store_menu()
self.installed_apps = []
self.progress = 0
for i in self.apps_dict:
i.setEnabled(False)
i.setChecked(False)
ui.label_refresh.show()
ui.label_info.hide()
ui.progressbar.show()
for widget in self.main_widgets:
widget.setEnabled(False)
QApplication.setOverrideCursor(QCursor(Qt.BusyCursor))
ui.label_refresh.setText(self.refresh_title)
self.check_thread = CheckApps(self.apps_dict)
self.check_thread.app_signal.connect(self.enable_installed)
self.check_thread.progress_signal.connect(self.update_progress)
self.check_thread.start()
def thread_finished(self):
"""Set up Main view after finishing a task."""
ui.progressbar.hide()
ui.label_refresh.hide()
ui.label_info.show()
ui.progressbar.setValue(0)
QApplication.setOverrideCursor(QCursor())
ui.label_info.setText(self.main_title)
for widget in (ui.refresh_btn, ui.refresh_bind, ui.store_btn, ui.store_bind):
widget.setEnabled(True)
self.enable_buttons()
def enable_installed(self, i):
"""Enable checkboxes while refreshing list of installed apps."""
i.setEnabled(True)
self.installed_apps.append(i)
self.enable_buttons()
def update_progress(self):
"""Update progress bar while refreshing list of installed apps."""
self.progress += 1
ui.progressbar.setValue(self.progress)
if self.progress >= len(self.apps_dict):
self.thread_finished()
def uninstall_progress(self, i):
"""Update progress bar and label while uninstalling selected apps."""
self.progress += 1
ui.progressbar.setValue(self.progress)
self.installed_apps.remove(i)
app_name = i.text().replace(' && ', ' & ')
apps_left = len(self.selected_apps) - self.progress + 1
ui.label_refresh.setText(QCoreApplication.translate("Label", "Uninstalling {0}, %n app(s) left...", "", apps_left).format(app_name))
ui.label_refresh.show()
if self.progress >= len(self.selected_apps):
self.thread_finished()
self.message_box(self.success_text)
def enable_buttons(self):
"""Enable buttons or open Microsoft Store when clicking checkboxes."""
if not self.is_link_menu:
self.total_size = 0
self.selected_apps = []
for i in self.installed_apps:
if i.isChecked():
self.selected_apps.append(i)
self.total_size += self.apps_dict[i]["size"]
ui.label_size.setText(f'{self.total_size:.2f} {self.size_text}')
ui.layout_widget_labels.adjustSize()
if any(i.isChecked() for i in self.installed_apps):
ui.button_uninstall.setDisabled(False)
ui.button_deselect_all.setDisabled(False)
else:
ui.button_uninstall.setDisabled(True)
ui.button_deselect_all.setDisabled(True)
ui.label_size.setText(f'{self.total_size} {self.size_text}')
if all(i.isChecked() for i in self.installed_apps):
ui.button_select_all.setDisabled(True)
else:
ui.button_select_all.setDisabled(False)
else:
for i in self.apps_dict:
if not i.isChecked():
i.setChecked(True)
webbrowser.open_new(f'ms-windows-store://pdp{self.apps_dict[i]["link"]}')
def message_box(self, message: str, buttons: int = 1) -> int:
'''
Message box with "Yes/No" or "OK" buttons. Defaults to "OK".\n
Parameters:\n
message (str): Message shown inside the message box.
buttons (int): Amount of buttons, 1 - "OK" button, 2 - "Yes/No" buttons.
Returns:\n
choice (int): ID of the clicked button.
'''
pixmap = QPixmap(resource_path('icon.ico')).scaledToWidth(35, Qt.SmoothTransformation)
msg_box = QMessageBox()
msg_box.setFont(ui.font)
msg_box.setText(message)
if buttons == 2:
msg_yes = msg_box.addButton(QMessageBox.Yes)
msg_no = msg_box.addButton(QMessageBox.No)
msg_yes.setText(self.dialog_yes)
msg_no.setText(self.dialog_no)
msg_yes.setProperty('class', 'button_yes')
msg_no.setProperty('class', 'button_no')
msg_box.setWindowFlags(Qt.Dialog | Qt.CustomizeWindowHint)
msg_box.setIconPixmap(pixmap)
with open(resource_path('style.css'), 'r') as file:
msg_box.setStyleSheet(file.read())
msg_box.move(ui.frameGeometry().center() - QRect(QPoint(), msg_box.sizeHint()).center())
choice = msg_box.exec_()
return choice
def app_homepage(self):
"""Open GitHub app homepage after confirmation."""
if self.message_box(self.github_dialog, 2) == QMessageBox.Yes:
webbrowser.open_new('https://github.com/Teraskull/PyDebloatX')
@staticmethod
def app_about():
"""Show 'About' window."""
about.setWindowModality(Qt.ApplicationModal)
about.move(ui.geometry().center() - about.rect().center())
about.show()
def app_quit(self):
"""Quit app after confirmation."""
if self.message_box(self.quit_dialog, 2) == QMessageBox.Yes:
app.quit()
def select_all(self):
"""Select all checkboxes for installed apps."""
for i in self.installed_apps:
if not i.isChecked():
i.setChecked(True)
self.enable_buttons()
def deselect_all(self):
"""Deselect all checkboxes for installed apps."""
for i in self.installed_apps:
if i.isChecked():
i.setChecked(False)
self.enable_buttons()
def uninstall(self):
"""Create threads to uninstall selected apps after confirmation."""
apps = len(self.selected_apps)
confirm_uninstall = QCoreApplication.translate("MessageBox", "Uninstall %n app(s)?", "", apps)
space_freed_text = QCoreApplication.translate("MessageBox", "MB of space will be freed.")
msg_uninstall = f"{confirm_uninstall}\n\n{self.total_size:.2f} {space_freed_text}"
if self.message_box(msg_uninstall, 2) == QMessageBox.Yes:
for widget in self.main_widgets:
widget.setEnabled(False)
ui.label_info.hide()
self.progress = 0
ui.progressbar.setMaximum(apps)
ui.progressbar.show()
self.new_thread_list = []
for item, i in enumerate(self.selected_apps):
i.setEnabled(False)
i.setChecked(False)
self.new_thread_list.append(UninstallApps(self.apps_dict, i))
self.new_thread_list[item].signals.progress_signal.connect(self.uninstall_progress)
self.newPoolThread = RunThreadPool(self.new_thread_list)
self.newPoolThread.start()
class CheckUpdates(QThread):
"""Check for updates and get the latest version number."""
version_signal = Signal(str)
def run(self):
try:
api_url = 'https://api.github.com/repos/Teraskull/PyDebloatX/releases/latest'
api_data = requests.get(api_url, timeout=(5, 0.7)).json()
# API rate limit exceeded (https://docs.github.com/rest/overview/resources-in-the-rest-api#rate-limiting)
if 'tag_name' in api_data:
latest_version = api_data['tag_name']
self.version_signal.emit(latest_version)
except requests.exceptions.RequestException:
pass
class CheckApps(QThread):
"""Refresh list of installed apps."""
progress_signal = Signal()
app_signal = Signal(object)
def __init__(self, apps_dict):
super().__init__()
self.apps_dict = apps_dict
def run(self):
si = subprocess.STARTUPINFO()
si.dwFlags |= subprocess.STARTF_USESHOWWINDOW
x = subprocess.Popen(["powershell", "Get-AppxPackage -PackageTypeFilter Main | Select Name, InstallLocation | ConvertTo-JSON"],
stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.STDOUT, shell=False, startupinfo=si, text=True)
names_str = x.communicate()[0]
names_list = json.loads(names_str)
for i in self.apps_dict:
temp_name = self.apps_dict[i]["name"].strip("*")
self.apps_dict[i]["size"] = 0
flag = False
if temp_name != "Xbox":
for item in names_list:
name = item["Name"]
if name.find(temp_name, 0, len(name)) != -1:
flag = True
self.apps_dict[i]["size"] += get_dir_size(item["InstallLocation"]) / 1024 / 1024
break
else:
for item in names_list:
name = item["Name"]
if name.find(temp_name, 0, len(name)) != -1 and name.find("XboxGameCallableUI", 0, len(name)) == -1:
flag = True
self.apps_dict[i]["size"] += get_dir_size(item["InstallLocation"]) / 1024 / 1024
if flag:
self.app_signal.emit(i)
self.progress_signal.emit()
class RunThreadPool(QThread):
"""Run thread pool for uninstalling selected apps."""
def __init__(self, new_thread_list):
super().__init__()
self.new_thread_list = new_thread_list
def run(self):
pool = QThreadPool()
for new_thread in self.new_thread_list:
pool.start(new_thread)
pool.waitForDone()
class UninstallSignals(QObject):
"""PyQt signal emitting class for uninstalling apps."""
progress_signal = Signal(object)
class UninstallApps(QRunnable):
"""Uninstall selected apps."""
def __init__(self, apps_dict, i):
super().__init__()
self.signals = UninstallSignals()
self.apps_dict = apps_dict
self.i = i
def run(self):
package_name = self.apps_dict[self.i]["name"]
if "Xbox" in package_name:
package_name = "*Xbox* | Where-Object {$_.name -notmatch 'XboxGameCallableUI'}"
si = subprocess.STARTUPINFO()
si.dwFlags |= subprocess.STARTF_USESHOWWINDOW
x = subprocess.Popen(
["powershell", f'try {{Get-AppxPackage {package_name} -OutVariable app | Remove-AppPackage -ea stop;[bool]$app}} catch {{$false}}'],
stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.STDOUT, shell=False, startupinfo=si
)
x.communicate()[0]
self.signals.progress_signal.emit(self.i)
if __name__ == '__main__':
QApplication.setAttribute(Qt.AA_EnableHighDpiScaling, True)
QApplication.setHighDpiScaleFactorRoundingPolicy(Qt.HighDpiScaleFactorRoundingPolicy.PassThrough)
app = QApplication(sys.argv)
app.setFont(QFont("Tahoma"))
locale = QLocale()
trans = QTranslator()
if trans.load(locale, "", "", resource_path("Language"), ".qm"):
app.installTranslator(trans)
about = Ui_AboutWindow()
about.setupUi()
ui = Ui_MainWindow()
ui.setupUi()
ui.show()
logic = Logic()
sys.exit(app.exec_())
| StarcoderdataPython |
9688572 | import sys
sys.path.append("..")
from interactive.audio.recorder import AudioRecorder
from interactive.audio.io import save_recording, load_recording
import time
import os
def print_m(message):
print(time.strftime('%X') + " " + str(message));
if __name__ == '__main__':
print_m("Hi!")
recording = AudioRecorder().record()
print_m("Recording length: " + str(float(len(recording.sound_data))/recording.bitrate));
file_name = raw_input("Type name of the recording: ")
if not file_name.lower().endswith(".wav"):
file_name += ".wav";
directory = os.path.join("data", "raw")
path = os.path.join("..", "data", "raw", file_name)
if not os.path.exists(directory):
os.makedirs(directory)
print_m("Saving to " + path)
save_recording(path, recording)
print_m("Good night")
| StarcoderdataPython |
320852 | import os
import sys
def create_dir(tournament_name,output_dir):
#Replace spaces with underscores for file naming
tournament_name=tournament_name.replace(" ","_")
#Replace any & with nothing
tournament_name=tournament_name.replace("&","")
#See if output_dir directory exists, if not, exit
if not os.path.isdir(output_dir):
print("'Output Directory' in 'inputs.txt' does not exist")
sys.exit()
#See if tournament directory exists, if not create it
if not os.path.isdir(output_dir+"/"+tournament_name):
print("Directory for tournament: {} has not been created yet, creating now".format(tournament_name))
os.mkdir(output_dir+"/"+tournament_name)
#Create subdirectories for tracking the scores,keeping the entries, and tracking the competion between entries
if not os.path.isdir(output_dir+"/"+tournament_name+"/scores"):
os.mkdir(output_dir+"/"+tournament_name+"/scores")
if not os.path.isdir(output_dir+"/"+tournament_name+"/entries"):
os.mkdir(output_dir+"/"+tournament_name+"/entries")
if not os.path.isdir(output_dir+"/"+tournament_name+"/pool_results"):
os.mkdir(output_dir+"/"+tournament_name+"/pool_results")
#tournament directory path
tournament_dir=output_dir+"/"+tournament_name
return tournament_name, tournament_dir
| StarcoderdataPython |
51844 | from sqlalchemy import DateTime, String, ForeignKey, Integer, Column, Float
from sqlalchemy.orm import relationship
from . import Base
class AumHistory(Base):
"""
Map class for table AumHistory.
- **aum_id**: Integer, primary_key.
- **aum_datetime**: DateTime, not null.
- **aum**: Float(20, 8), not null.
- **ts_name**: String(150), not null, foreign_key(ts.ts_name).
Relationships:
- **ts**: TradingSystem instance. (Many-to-One)
"""
__tablename__ = "aum_history"
aum_id = Column(Integer, primary_key = True)
aum_datetime = Column(DateTime, nullable = False)
aum = Column(Float(precision = 20, scale = 8, asdecimal = True), nullable = False)
ts_name = Column(String(150), ForeignKey("ts.ts_name"), nullable = False)
ts = relationship("Ts")
def __repr__(self):
return "<AumHistory(datetime={}, aum={}, ts={})>".format(self.aum_datetime,
self.aum,
self.ts_name
)
| StarcoderdataPython |
6518402 | <filename>deepy/layers/recurrent.py
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from abc import ABCMeta, abstractmethod
import numpy as np
import theano.tensor as T
from deepy.core.tensor_conversion import neural_computation
from deepy.utils import XavierGlorotInitializer, OrthogonalInitializer, Scanner
from deepy.core import env
import deepy.tensor as DT
from . import NeuralLayer
OUTPUT_TYPES = ["sequence", "one"]
INPUT_TYPES = ["sequence", "one"]
class RecurrentLayer(NeuralLayer):
__metaclass__ = ABCMeta
def __init__(self, name, state_names, hidden_size=100, input_type="sequence", output_type="sequence",
inner_init=None, outer_init=None,
gate_activation='sigmoid', activation='tanh',
steps=None, backward=False, mask=None,
additional_input_dims=None):
from deepy.core.neural_var import NeuralVariable
from deepy.tensor.activations import get_activation
super(RecurrentLayer, self).__init__(name)
self.state_names = state_names
self.main_state = state_names[0]
self.hidden_size = hidden_size
self._gate_activation = gate_activation
self._activation = activation
self.gate_activate = get_activation(self._gate_activation)
self.activate = get_activation(self._activation)
self._input_type = input_type
self._output_type = output_type
self.inner_init = inner_init if inner_init else OrthogonalInitializer()
self.outer_init = outer_init if outer_init else XavierGlorotInitializer()
self._steps = steps
self._mask = mask.tensor if type(mask) == NeuralVariable else mask
self._go_backwards = backward
self.additional_input_dims = additional_input_dims if additional_input_dims else []
if input_type not in INPUT_TYPES:
raise Exception("Input type of {} is wrong: {}".format(name, input_type))
if output_type not in OUTPUT_TYPES:
raise Exception("Output type of {} is wrong: {}".format(name, output_type))
@neural_computation
def step(self, step_inputs):
new_states = self.compute_new_state(step_inputs)
# apply mask for each step if `output_type` is 'one'
if step_inputs.get("mask"):
mask = step_inputs["mask"].dimshuffle(0, 'x')
for state_name in new_states:
new_states[state_name] = new_states[state_name] * mask + step_inputs[state_name] * (1 - mask)
return new_states
@abstractmethod
def compute_new_state(self, step_inputs):
"""
:type step_inputs: dict
:rtype: dict
"""
@abstractmethod
def merge_inputs(self, input_var, additional_inputs=None):
"""
Merge inputs and return a map, which will be passed to core_step.
:type input_var: T.var
:param additional_inputs: list
:rtype: dict
"""
@abstractmethod
def prepare(self):
pass
@neural_computation
def compute_step(self, state, lstm_cell=None, input=None, additional_inputs=None):
"""
Compute one step in the RNN.
:return: one variable for RNN and GRU, multiple variables for LSTM
"""
if not self.initialized:
input_dim = None
if input and hasattr(input.tag, 'last_dim'):
input_dim = input.tag.last_dim
self.init(input_dim)
input_map = self.merge_inputs(input, additional_inputs=additional_inputs)
input_map.update({"state": state, "lstm_cell": lstm_cell})
output_map = self.compute_new_state(input_map)
outputs = [output_map.pop("state")]
outputs += output_map.values()
for tensor in outputs:
tensor.tag.last_dim = self.hidden_size
if len(outputs) == 1:
return outputs[0]
else:
return outputs
@neural_computation
def get_initial_states(self, input_var, init_state=None):
"""
:type input_var: T.var
:rtype: dict
"""
initial_states = {}
for state in self.state_names:
if state != "state" or not init_state:
if self._input_type == 'sequence' and input_var.ndim == 2:
init_state = T.alloc(np.cast[env.FLOATX](0.), self.hidden_size)
else:
init_state = T.alloc(np.cast[env.FLOATX](0.), input_var.shape[0], self.hidden_size)
initial_states[state] = init_state
return initial_states
@neural_computation
def get_step_inputs(self, input_var, states=None, mask=None, additional_inputs=None):
"""
:type input_var: T.var
:rtype: dict
"""
step_inputs = {}
if self._input_type == "sequence":
if not additional_inputs:
additional_inputs = []
if mask:
step_inputs['mask'] = mask.dimshuffle(1, 0)
step_inputs.update(self.merge_inputs(input_var, additional_inputs=additional_inputs))
else:
# step_inputs["mask"] = mask.dimshuffle((1,0)) if mask else None
if additional_inputs:
step_inputs.update(self.merge_inputs(None, additional_inputs=additional_inputs))
if states:
for name in self.state_names:
step_inputs[name] = states[name]
return step_inputs
def compute(self, input_var, mask=None, additional_inputs=None, steps=None, backward=False, init_states=None, return_all_states=False):
from deepy.core.neural_var import NeuralVariable
if additional_inputs and not self.additional_input_dims:
self.additional_input_dims = map(lambda var: var.dim(), additional_inputs)
result_var = super(RecurrentLayer, self).compute(input_var,
mask=mask, additional_inputs=additional_inputs, steps=steps, backward=backward, init_states=init_states, return_all_states=return_all_states)
if return_all_states:
state_map = {}
for k in result_var.tensor:
state_map[k] = NeuralVariable(result_var.tensor[k], self.output_dim)
return state_map
else:
return result_var
def compute_tensor(self, input_var, mask=None, additional_inputs=None, steps=None, backward=False, init_states=None, return_all_states=False):
# prepare parameters
backward = backward if backward else self._go_backwards
steps = steps if steps else self._steps
mask = mask if mask else self._mask
if mask and self._input_type == "one":
raise Exception("Mask only works with sequence input")
# get initial states
init_state_map = self.get_initial_states(input_var)
if init_states:
for name, val in init_states.items():
if name in init_state_map:
init_state_map[name] = val
# get input sequence map
if self._input_type == "sequence":
# Move middle dimension to left-most position
# (sequence, batch, value)
if input_var.ndim == 3:
input_var = input_var.dimshuffle((1,0,2))
seq_map = self.get_step_inputs(input_var, mask=mask, additional_inputs=additional_inputs)
else:
init_state_map[self.main_state] = input_var
seq_map = self.get_step_inputs(None, mask=mask, additional_inputs=additional_inputs)
# scan
retval_map, _ = Scanner(
self.step,
sequences=seq_map,
outputs_info=init_state_map,
n_steps=steps,
go_backwards=backward
).compute()
# return main states
main_states = retval_map[self.main_state]
if self._output_type == "one":
if return_all_states:
return_map = {}
for name, val in retval_map.items():
return_map[name] = val[-1]
return return_map
else:
return main_states[-1]
elif self._output_type == "sequence":
if return_all_states:
return_map = {}
for name, val in retval_map.items():
return_map[name] = val.dimshuffle((1,0,2))
return return_map
else:
main_states = main_states.dimshuffle((1,0,2)) # ~ batch, time, size
# if mask: # ~ batch, time
# main_states *= mask.dimshuffle((0, 1, 'x'))
return main_states
class RNN(RecurrentLayer):
def __init__(self, hidden_size, **kwargs):
kwargs["hidden_size"] = hidden_size
super(RNN, self).__init__("RNN", ["state"], **kwargs)
@neural_computation
def compute_new_state(self, step_inputs):
xh_t, h_tm1 = map(step_inputs.get, ["xh_t", "state"])
if not xh_t:
xh_t = 0
h_t = self.activate(xh_t + T.dot(h_tm1, self.W_h) + self.b_h)
return {"state": h_t}
@neural_computation
def merge_inputs(self, input_var, additional_inputs=None):
if not additional_inputs:
additional_inputs = []
all_inputs = ([input_var] if input_var else []) + additional_inputs
h_inputs = []
for x, weights in zip(all_inputs, self.input_weights):
wi, = weights
h_inputs.append(T.dot(x, wi))
merged_inputs = {
"xh_t": sum(h_inputs)
}
return merged_inputs
def prepare(self):
self.output_dim = self.hidden_size
self.W_h = self.create_weight(self.hidden_size, self.hidden_size, "h", initializer=self.outer_init)
self.b_h = self.create_bias(self.hidden_size, "h")
self.register_parameters(self.W_h, self.b_h)
self.input_weights = []
if self._input_type == "sequence":
normal_input_dims = [self.input_dim]
else:
normal_input_dims = []
all_input_dims = normal_input_dims + self.additional_input_dims
for i, input_dim in enumerate(all_input_dims):
wi = self.create_weight(input_dim, self.hidden_size, "wi_{}".format(i+1), initializer=self.outer_init)
weights = [wi]
self.input_weights.append(weights)
self.register_parameters(*weights) | StarcoderdataPython |
9629948 | <filename>pyark/rest_client.py
import logging
import requests
import datetime
import abc
from furl import furl
import pyark.backoff_retrier as backoff_retrier
from pyark.errors import CvaServerError, CvaClientError
class RestClient(object):
_session = requests.Session()
def __init__(self, url_base, endpoint_base=None, retries=5):
self._url_base = url_base
self._endpoint_base = endpoint_base
self._headers = {
'Accept': 'application/json'
}
self._token = None
self._renewed_token = False
# decorates the REST verbs with retries
self._get = backoff_retrier.wrapper(self._get, retries)
self._post = backoff_retrier.wrapper(self._post, retries)
self._delete = backoff_retrier.wrapper(self._delete, retries)
def _build_url(self, endpoint):
f = furl(self._url_base)
segments = []
if self._endpoint_base:
segments = self._endpoint_base.split("/")
if isinstance(endpoint, (str,)):
endpoint = endpoint.split("/")
if isinstance(endpoint, (list,)):
segments = segments + endpoint
else:
segments.append(endpoint)
f.path.segments = segments
return f.url
def _set_authenticated_header(self, renew_token=False):
if not self._token or renew_token:
self._token = self._get_token()
self._headers["Authorization"] = "{token}".format(token=self._token)
@abc.abstractmethod
def _get_token(self):
raise ValueError("Not implemented")
def _post(self, endpoint, payload, session=True, verify=True, **params):
if endpoint is None or payload is None:
raise ValueError("Must define payload and endpoint before post")
url = self._build_url(endpoint)
if session:
response = self._session.post(url, json=payload, params=params, headers=self._headers)
else:
response = requests.post(url, json=payload, params=params, headers=self._headers)
request = "{method} {url}".format(
method="POST", url="{}?{}".format(url, "&".join(RestClient._build_parameters(params))))
logging.info(request)
if verify:
self._verify_response(response, request)
return response.json(), dict(response.headers)
def _get(self, endpoint, session=True, **params):
if endpoint is None:
raise ValueError("Must define endpoint before get")
url = self._build_url(endpoint)
if session:
response = self._session.get(url, params=params, headers=self._headers)
else:
response = requests.get(url, params=params, headers=self._headers)
request = "{method} {url}".format(
method="GET", url="{}?{}".format(url, "&".join(RestClient._build_parameters(params))))
logging.info(request)
self._verify_response(response, request)
return response.json(), dict(response.headers)
def _patch(self, endpoint, session=True, **params):
if endpoint is None:
raise ValueError("Must define endpoint before patch")
url = self._build_url(endpoint)
if session:
response = self._session.patch(url, params=params, headers=self._headers)
else:
response = requests.patch(url, params=params, headers=self._headers)
request = "{method} {url}".format(
method="PATCH", url="{}?{}".format(url, "&".join(RestClient._build_parameters(params))))
logging.info(request)
self._verify_response(response, request)
return response.json(), dict(response.headers)
def _delete(self, endpoint, **params):
if endpoint is None:
raise ValueError("Must define endpoint before get")
url = self._build_url(endpoint)
response = self._session.delete(url, params=params, headers=self._headers)
request = "{method} {url}".format(
method="DELETE", url="{}?{}".format(url, "&".join(RestClient._build_parameters(params))))
logging.info(request)
self._verify_response(response, request)
return response.json(), dict(response.headers)
@staticmethod
def _build_parameters(params):
parsed_params = []
for k, v in params.items():
if isinstance(v, list):
parsed_params.extend(["{}={}".format(k, e) for e in v])
else:
parsed_params.append("{}={}".format(k, v))
return parsed_params
def _verify_response(self, response, request):
logging.debug("{date} response status code {status}".format(
date=datetime.datetime.now(),
status=response.status_code)
)
if response.status_code != 200:
# first 403 renews the token, second 403 in a row fails
if response.status_code in (403, 401) and not self._renewed_token:
# renews the token if unauthorised
self._set_authenticated_header(renew_token=True)
self._renewed_token = True
# RequestException will trigger a retry and with the renewed token it may work
self.log_error(response, request)
raise requests.exceptions.RequestException(response=response)
# ValueError will not
if 500 <= response.status_code < 600:
self.log_error(response, request)
raise CvaServerError("{}:{}".format(response.status_code, response.text))
elif 400 <= response.status_code < 500:
if response.status_code != 404: # we want to hide 404 for empty results to the end user
self.log_error(response, request)
raise CvaClientError("{}:{}".format(response.status_code, response.text))
else:
self.log_error(response, request)
raise ValueError("{}:{}".format(response.status_code, response.text))
else:
# once a 200 response token is not anymore just renewed, it can be renewed again if a 403 arrives
self._renewed_token = False
def log_error(self, response, request):
logging.error(request)
logging.error("{} - {}".format(response.status_code, response.text))
| StarcoderdataPython |
6422780 | <filename>app/database/models/blog_tag.py
# coding: utf-8
from app.plugins import db
class BlogTag(db.Model):
__tablename__ = 'blog_tag'
__table_args__ = {'schema': 'onekki_site'}
id = db.Column(db.Integer, primary_key=True)
name = db.Column(db.String(255)) | StarcoderdataPython |
6675792 | <filename>download_data.py
from src.data_utils import get_data_types, get_data_file_name, save_data
from src.download_utils import download_from_data_tracker
def main():
for data_type in get_data_types():
data = download_from_data_tracker(data_type)
if data is not None:
fname = get_data_file_name(data_type)
save_data(data, fname=fname)
return True
if __name__ == "__main__":
main()
| StarcoderdataPython |
79843 | import sys
import argparse
import os
import cv2
import yaml
from PIL import Image
from importlib.machinery import SourceFileLoader
import torch
from torch import nn
from tqdm import tqdm
import numpy as np
import matplotlib.pyplot as plt
import pandas
import numpy
__filedir__ = os.path.dirname(os.path.realpath(__file__))
# network_module = SourceFileLoader(".", os.path.join(__filedir__, "network.py")).load_module()
import feature_graph.models.dtoid.network as network_module
class DTOIDWrapper(nn.Module):
def __init__(self, backend="cuda", no_filter_z=False):
super(DTOIDWrapper, self).__init__()
# Initialize the network
model = network_module.Network()
model.eval()
# model_path = os.path.join(__filedir__, "model.pth.tar")
model_path = "/home/qiaog/src/DTOID/model.pth.tar"
checkpoint = torch.load(model_path, map_location=lambda storage, loc: storage)
model.load_state_dict(checkpoint["state_dict"])
if backend == "cuda":
model = model.cuda()
self.model = model
self.backend = backend
self.no_filter_z = no_filter_z
self.preprocess = network_module.PREPROCESS
# self.model_directory = os.path.join(__filedir__, "templates")
self.model_directory = "/home/qiaog/src/DTOID/templates"
self.template_cache = {}
def clearCache(self):
del self.template_cache
self.template_cache = {}
def getTemplates(self, linemod_model):
'''
linemod_model: str of the linemod object ID ("01", "02", ...)
'''
if linemod_model in self.template_cache:
return
assert type(linemod_model) is str
model_name = "hinterstoisser_" + linemod_model
template_dir = os.path.join(self.model_directory, model_name)
output_file = "{}.yml".format(model_name)
#load text file
pose_file = os.path.join(template_dir, "poses.txt")
pose_file_np = pandas.read_csv(pose_file, sep=" ", header=None).values
pose_z_values = pose_file_np[:, 11]
# Template
global_template_list = []
template_paths = [x for x in os.listdir(template_dir) if len(x) == 12 and "_a.png" in x]
template_paths.sort()
preprocessed_templates = []
# features for all templates (240)
template_list = []
template_global_list = []
template_ratios_list = []
batch_size = 10
temp_batch_local = []
temp_batch_global = []
temp_batch_ratios = []
iteration = 0
for t in tqdm(template_paths):
# open template and template mask
template_im = cv2.imread(os.path.join(template_dir, t))[:, :, ::-1]
template = Image.fromarray(template_im)
template_mask = cv2.imread(os.path.join(template_dir, t.replace("_a", "_m")))[:, :, 0]
template_mask = Image.fromarray(template_mask)
# preprocess and concatenate
template = self.preprocess[1](template)
template_mask = self.preprocess[2](template_mask)
template = torch.cat([template, template_mask], dim=0)
if self.backend == "cuda":
template = template.cuda()
template_feature = self.model.compute_template_local(template.unsqueeze(0))
# Create mini-batches of templates
if iteration == 0:
temp_batch_local = template_feature
template_feature_global = self.model.compute_template_global(template.unsqueeze(0))
template_global_list.append(template_feature_global)
elif iteration % (batch_size) == 0:
template_list.append(temp_batch_local)
temp_batch_local = template_feature
elif iteration == (len(template_paths) - 1):
temp_batch_local = torch.cat([temp_batch_local, template_feature], dim=0)
template_list.append(temp_batch_local)
else:
temp_batch_local= torch.cat([temp_batch_local, template_feature], dim=0)
iteration += 1
self.template_cache[linemod_model] = (template_list, template_global_list, pose_z_values)
def forward(self, img_numpy, obj_id):
template_list, template_global_list, pose_z_values = self.template_cache[obj_id]
img_h, img_w, img_c = img_numpy.shape
img = Image.fromarray(img_numpy)
img = self.preprocess[0](img)
network_h = img.size(1)
network_w = img.size(2)
if self.backend == "cuda":
img = img.cuda()
top_k_num = 500
top_k_scores, top_k_bboxes, top_k_template_ids, seg_pred = self.model.forward_all_templates(
img.unsqueeze(0), template_list, template_global_list, topk=top_k_num)
pred_seg_np = seg_pred.cpu().numpy()
pred_scores_np = top_k_scores.cpu().numpy()
pred_bbox_np = top_k_bboxes.cpu().numpy()
pred_template_ids = top_k_template_ids[:, 0].long().cpu().numpy()
template_z_values = pose_z_values[pred_template_ids]
if not self.no_filter_z:
pred_w_np = pred_bbox_np[:, 2] - pred_bbox_np[:, 0]
pred_h_np = pred_bbox_np[:, 3] - pred_bbox_np[:, 1]
pred_max_dim_np = np.stack([pred_w_np, pred_h_np]).transpose().max(axis=1)
pred_z = (124 / pred_max_dim_np) * -template_z_values
# Filter based on predicted Z values
pred_z_conds = (pred_z > 0.4) & (pred_z < 2)
pred_z_conds_ids = numpy.where(pred_z_conds)[0]
pred_scores_np = pred_scores_np[pred_z_conds_ids]
pred_bbox_np = pred_bbox_np[pred_z_conds_ids]
pred_template_ids = pred_template_ids[pred_z_conds_ids]
pred_z = pred_z[pred_z_conds_ids]
# Keep top 1 (eval)
pred_scores_np = pred_scores_np[:1]
pred_bbox_np = pred_bbox_np[:1]
pred_template_ids = pred_template_ids[:1]
pred_z = pred_z[:1]
pred_seg_np = pred_seg_np[:1]
output = {
"pred_bbox_np": pred_bbox_np,
"pred_scores_np": pred_scores_np,
"pred_seg_np": pred_seg_np,
"pred_template_ids": pred_template_ids,
"network_w": network_w,
"network_h": network_h,
"img_h": img_h,
"img_w": img_w,
}
return output
| StarcoderdataPython |
1908179 | import numpy as np
import cv2
import math
import euler
def getSmallestRotMatBetweenTwoDirections(w_directionBefore,w_directionAfter): #rotate frame (to keep the point constant)
w_rotVec=getSmallestRotVecBetweenTwoDirections(w_directionBefore,w_directionAfter)
return rotVecToRotMat(w_rotVec) #oldFrame_newFrame
def getSmallestRotVecBetweenTwoDirections(w_directionBefore,w_directionAfter):
#print(w_directionBefore)
#print(w_directionAfter)
#print(math.acos(np.dot(w_directionBefore,w_directionAfter)))
axis=crossAxis(w_directionBefore,w_directionAfter)
w_rotVec=axis*math.acos(np.dot(w_directionBefore,w_directionAfter))
return w_rotVec
def getAngleBetweenTwoDirection(w_directionBefore,w_directionAfter):
return math.acos(np.dot(w_directionBefore,w_directionAfter))
def getSmallestRotMatFromThreePoints(C,P1,P2): #C is center of rotation
C=C[:3]
P1=P1[:3]
P2=P2[:3]
worldA_directionBefore = normalize(P1 - C)
worldA_directionAfter = normalize(P2 - C)
oldFrame_newFrame=rotationHomo(getSmallestRotMatBetweenTwoDirections(worldA_directionBefore,worldA_directionAfter)) #tested
return oldFrame_newFrame
def combineRotVecAndTranslation(rotVec,translation):
return combineRotMatAndTranslation(rotVecToRotMat(rotVec), translation)
def getTransformation(xAxis,yAxis,zAxis,origin):
#t=np.zeros([3,4])
t=np.eye(4)
t[:3,0]=xAxis
t[:3,1]=yAxis
t[:3,2]=zAxis
t[:3,3]=origin[:3]
return t #3x4 transformation matrix (X,Y,Z,Origin) #change to 4x4 on 11/11/19
def rotMatToRotVec(rotMat):
rotVec,_=cv2.Rodrigues(rotMat)
return np.ravel(rotVec)
def rotVecToRotMat(rotVec):
rotMat,_=cv2.Rodrigues(rotVec)
return rotMat
def getRotVecBetweenFrames(w_a,w_b):
a_b = w_a[:3,:3].transpose() @ w_b[:3,:3]
return rotMatToRotVec(a_b)
def allKeyExist(d,listOfKeys):
return all(name in d for name in listOfKeys)
def length(v):
return np.linalg.norm(v)
def normalize(v):
if v.shape[0]==3:
return v/np.linalg.norm(v)
if v.shape[0]==4:
return normalize(v[:3])
def crossAxis(v1,v2):
return normalize(np.cross(v1,v2))
def homo(v):
if(len(v.shape)==1 and v.shape[0]==3):
return np.concatenate([v,np.array([1])],axis=0)
elif(len(v.shape)==2 and v.shape[0]==3 and v.shape[1]==4):
return np.concatenate([v,np.array([[0,0,0,1]])])
elif(len(v.shape)==2 and v.shape[0]==3 and v.shape[1]==3):
ans=np.identity(4)
ans[:3,:3]=v
return ans
else:
return v
def inverseHomo(T):
ans=np.zeros([4,4])
ans[3,3]=1
ans[:3,:3]=T[:3,:3].transpose()
ans[:3,3]= - ans[:3,:3] @ T[:3,3]
return ans
def combine3axis(A_xAxisB,A_yAxisB,A_ZaxisB):
A_Bframe=np.stack([A_xAxisB,A_yAxisB,A_ZaxisB],axis=-1)
return A_Bframe
def combineRotMatAndTranslation(rotMat,tran):
ans=homo(rotMat)
ans[:3,3]=tran
return ans
def translationHomo(tran):
return combineRotMatAndTranslation(np.eye(3),tran)
def rotationHomo(rotMat):
return combineRotMatAndTranslation(rotMat,np.zeros(3))
def doubleCrossAxis():
return 0
def segmentR_segmentM_orientation():
return 0
def rx(a):
return np.array([
[1,0,0],
[0,np.cos(a),-np.sin(a)],
[0,np.sin(a),np.cos(a)]
])
def ry(a):
return np.array([
[np.cos(a),0,np.sin(a)],
[0,1,0],
[-np.sin(a),0,np.cos(a)]
])
def rz(a):
return np.array([
[np.cos(a),-np.sin(a),0],
[np.sin(a),np.cos(a),0],
[0,0,1]
])
def eulerZXY(rotMat): #use this version
R=rotMat[:3,:3]
y,x,z=euler.R_to_euler(R,'yxz','static') #,second_sol=True)
#I feel that the quality could be bad sometimes
# I should revert it back to rotMat can check orientation error in degree
R2 = rz(z)@rx(x)@ry(y) #it should be very close to R
Rdiff = R2.transpose() @ R
degDiff=length(rotMatToRotVec(Rdiff))*180/np.pi
if(degDiff>0.1):
print("*******",degDiff) #tested, no issue
a=wrap(z)*180/np.pi
b=wrap(x)*180/np.pi
c=wrap(y)*180/np.pi
return [a,b,c] #[Z,X,Y]
'''
def eulerZXY(rotMat):
R=rotMat[:3,:3]
z,x,y=euler.R_to_euler(R,'zxy','static') #,second_sol=True)
#I feel that the quality could be bad sometimes
# I should revert it back to rotMat can check orientation error in degree
R2 = ry(y)@rx(x)@rz(z) #it should be very close to R
Rdiff = R2.transpose() @ R
degDiff=length(rotMatToRotVec(Rdiff))*180/np.pi
if(degDiff>0.1):
print("*******",degDiff)
a=wrap(z)*180/np.pi
b=wrap(x)*180/np.pi
c=wrap(y)*180/np.pi
return [a,b,c] #[Z,X,Y]
'''
'''
def eulerZXY(rotMat):
#not smooth
#z,x,y=euler.R_to_euler_zxy(rotMat[:3,:3])
#return [z*180/np.pi,x*180/np.pi,y*180/np.pi]
#smooth but wrong direction
#a,b,c=euler.R_to_euler(rotMat[:3,:3].transpose(),'yxz')
#return [a*180/np.pi,b*180/np.pi,c*180/np.pi]
#weird angle still come out (look the same as the simplest one)
#a,b,c=euler.R_to_euler(rotMat[:3,:3].transpose(),'yxz')
#return [-c*180/np.pi,-b*180/np.pi,-a*180/np.pi] #[Z,X,Y]
#not smooth, looks like the first one (zxy original)
#y,x,z=euler.R_to_euler(rotMat[:3,:3].transpose(),'yxz')
#return [-z*180/np.pi,-x*180/np.pi,-y*180/np.pi]
#y,x,z=euler.R_to_euler(rotMat[:3,:3].transpose(),'yxz')
#return [x*180/np.pi,y*180/np.pi,z*180/np.pi]
#simplest one that should work
a,b,c=euler.R_to_euler(rotMat[:3,:3],'zxy','static') #,second_sol=True)
a=wrap(a)*180/np.pi
b=wrap(b)*180/np.pi
c=wrap(c)*180/np.pi
return [a,b,c] #[Z,X,Y]
#a,b,c=euler.R_to_euler(rotMat[:3,:3].transpose(),'zxy','rotating') #static or rotating
#a*=180/np.pi
#b*=180/np.pi
#c*=180/np.pi
#return [a,b,c] #[Z,X,Y]
'''
def wrap(a): #just to make number stay in a smaller range
if(a>np.pi):
return a-(2*np.pi)
if(a<-np.pi):
return a+(2*np.pi)
return a | StarcoderdataPython |
145239 | from botocore.exceptions import ClientError
import boto3
import json
import sys
POLICY_DOCUMENT = {
"Statement":[
{
"Action": ["dynamodb:Scan", "dynamodb:Query"],
"Effect": "Allow",
"Resource": "*"
},
{
"Action":[
"ec2:Describe*"
],
"Effect":"Allow",
"Resource":"*"
},
{
"Action":["rds:Describe*"],
"Effect":"Allow",
"Resource":"*"
},
{
"Action":["s3:Get*",
"s3:List*"
],
"Effect":"Allow",
"Resource":"*"
},
{
"Action":["sdb:GetAttributes",
"sdb:List*",
"sdb:Select*"
],
"Effect":"Allow",
"Resource":"*"
},
{
"Action":["sns:Get*",
"sns:List*"
],
"Effect":"Allow",
"Resource":"*"
},
{
"Action":["sqs:ListQueues",
"sqs:GetQueueAttributes",
"sqs:ReceiveMessage"
],
"Effect":"Allow",
"Resource":"*"
},
{
"Action":["autoscaling:Describe*"
],
"Effect":"Allow",
"Resource":"*"
},
{
"Action":["elasticloadbalancing:Describe*"
],
"Effect":"Allow",
"Resource":"*"
},
{
"Action":["cloudwatch:Describe*",
"cloudwatch:List*",
"cloudwatch:Get*"
],
"Effect":"Allow",
"Resource":"*"
},
{
"Action":[
"iam:Get*",
"iam:List*"
],
"Effect":"Allow",
"Resource":"*"
}
]
}
LAMBDA_ASSUME_POLICY = {
"Version": "2012-10-17",
"Statement":[{
"Sid": "",
"Effect": "Allow",
"Principal": {
"Service": "lambda.amazonaws.com"
},
"Action": "sts:AssumeRole"
},{
"Sid": "",
"Effect": "Allow",
"Principal": {
"Service": "apigateway.amazonaws.com"
},
"Action": "sts:AssumeRole"
}
]
}
client = boto3.client('iam')
ROLE_NAME = 'awslimits'
try:
client.create_role(
RoleName=ROLE_NAME,
AssumeRolePolicyDocument=json.dumps(LAMBDA_ASSUME_POLICY),
)
except ClientError as exc:
if exc.response['Error']['Code'] != 'EntityAlreadyExists':
raise
client.put_role_policy(
RoleName=ROLE_NAME,
PolicyName=ROLE_NAME,
PolicyDocument=json.dumps(POLICY_DOCUMENT),
)
env, settings = sys.argv[1:]
settings = json.loads(open(settings).read())[env]
function_name = "-".join([settings['project_name'], env])
lambda_client = boto3.client("lambda", region_name='us-east-1')
try:
lambda_client.delete_function(
FunctionName=function_name,
)
except ClientError as exc:
if exc.response['Error']['Code'] != 'ResourceNotFoundException':
raise
apigateway_client = boto3.client("apigateway", region_name='us-east-1')
apis = apigateway_client.get_rest_apis()['items']
matching_api_ids = [api['id'] for api in apis if api['name'] == function_name]
for matching_api_id in matching_api_ids:
response = apigateway_client.delete_rest_api(
restApiId=matching_api_id
)
| StarcoderdataPython |
3336767 | #DESAFIO 038:
import time
n1 = float(input("Digite o primeiro número: "))
n2 = float(input("Digite o segundo número: "))
print("Calculando.. .")
time.sleep(2)
if n1 > n2:
print("O primeiro número é maior!")
elif n2 > n1:
print("O segundo número é maior!")
else:
print("Os números são iguais!")
| StarcoderdataPython |
6681373 | '''
Created on Apr 21, 2014
@author: Prateek
'''
import unittest
from mywork.task3 import *
class Test(unittest.TestCase):
def setUp(self):
pass
def testClassify(self):
pass
def testPopulate_datasets(self):
pass
def testCrossvalidate(self):
pass
def testFeatureExtractor(self):
pass
if __name__ == "__main__":
#import sys;sys.argv = ['', 'Test.testName']
unittest.main() | StarcoderdataPython |
3346871 | <reponame>QuDong/Algorithm4
# -*- coding: utf-8 -*-
"""
Created on Mon Nov 28 11:27:34 2016
@author: dong.qu
"""
class Node():
def __init__(self, data):
self.data=data
self.left=None
self.right=None
self.hd = float("inf")
def btm_view_std(root):
if not root:
return
hd = 0
m = {} #map: dict
q = [root]
root.hd = 0
while q:
temp = q.pop(0)
hd = temp.hd
m[hd] = temp.data
if temp.left:
temp.left.hd = hd - 1
q.append(temp.left)
if temp.right:
temp.right.hd = hd + 1
q.append(temp.right)
return [m[k] for k in sorted(m.keys())]
def lvl_travser(root):
q = [root]
while q:
temp = q.pop(0)
print(temp.data)
if temp.left:
q.append(temp.left)
if temp.right:
q.append(temp.right)
#=========================================================
# below is my way of doing this, but results are slightly different
def btm_view(root):
lvldict = {}
lvl = 0
dfs(root, lvl, lvldict)
return [lvldict[k] for k in sorted(lvldict.keys())]
def dfs(root, lvl, lvldict):
if root:
dfs(root.left, lvl-1, lvldict)
dfs(root.right, lvl+1, lvldict)
if lvl not in lvldict:
lvldict[lvl] = root.data
"""
20
/ \
8 22
/ \ / \
5 3 4 25
/ \
10 14
"""
root = Node(20)
root.left = Node(8)
root.right = Node(22)
root.left.left = Node(5)
root.left.right = Node(3)
root.left.right.left = Node(10)
root.left.right.right = Node(14)
root.right.left = Node(4)
root.right.right = Node(25)
print(btm_view(root))
print(btm_view_std(root)) | StarcoderdataPython |
3470531 | <reponame>maydewd/stoq-plugins-public<filename>connector/stdout/stdout/stdout.py
# Copyright 2014-2015 PUNCH Cyber Analytics Group
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Overview
========
Sends content to STDOUT
"""
from stoq.plugins import StoqConnectorPlugin
class StdoutConnector(StoqConnectorPlugin):
def __init__(self):
super().__init__()
def activate(self, stoq):
self.stoq = stoq
super().activate()
def save(self, payload, **kwargs):
"""
Print results to STDOUT
:param bytes payload: Content to be printed to STDOUT
:param **kwargs kwargs: Additional attributes (unused)
"""
if type(payload) == dict:
print(self.stoq.dumps(payload))
else:
print(payload)
| StarcoderdataPython |
1784884 | <filename>orientacao_objetos/direcao.py
"""
A direção terá a responsabilidade de controlar a direção. Ela oferece os seguintes atributos:
1) Valor de direção com valores possíveis: Norte, Sul, Leste, Oeste.
2) Método girar_a_direita
3) Método girar_a_esquerda
"""
class Direcao:
def __init__(self):
self.valor = "Norte"
def girar_a_direita(self):
"""Deve alterar o valor virando a direita.
Deverá alterar o valor da direção para a próxima direção olhando a sua direita,
por exemplo:
>>> direcao = Direcao()
>>> direcao.valor
'Norte'
>>> direcao.girar_a_direita()
>>> direcao.valor
'Leste'
>>> direcao.girar_a_direita()
>>> direcao.valor
'Sul'
"""
direcoes = {"Norte": "Leste", "Leste": "Sul", "Sul": "Oeste", "Oeste": "Norte"}
self.valor = direcoes.get(self.valor)
def girar_a_esquerda(self):
"""Deve alterar o valor virando a esquerda.
Deverá alterar o valor da direção para a próxima direção a sua esquerda,
por exemplo:
>>> direcao = Direcao()
>>> direcao.valor
'Norte'
>>> direcao.girar_a_esquerda()
>>> direcao.valor
'Oeste'
>>> direcao.girar_a_esquerda()
>>> direcao.valor
'Sul'
"""
direcoes = {"Norte": "Oeste", "Oeste": "Sul", "Sul": "Leste", "Leste": "Norte"}
self.valor = direcoes.get(self.valor)
| StarcoderdataPython |
6414990 | import pytest
import os
import sys
BASEDIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
if os.path.isdir(os.path.join(BASEDIR, 'depend_test_framework')):
os.environ['PATH'] += ":" + os.path.join(BASEDIR, 'tests')
sys.path.insert(0, BASEDIR)
from depend_test_framework.env import Env
def test_env():
e = Env()
e.set_data('a.c', 1)
assert e['a']['c'].data == 1
assert e.get_data('a.c').data == 1
assert e.struct_table() == '{ a|False: { c|True: {},},}'
e.set_data('a.c', False)
assert e.struct_table() == '{}'
e2 = Env()
e2.set_data('a.b', 1)
e.set_data('a.c', 1)
assert not e2 <= e
assert not e2 >= e
assert e2 is not e
e2.set_data('a.c', 1)
assert not e2 <= e
assert e2 >= e
assert e2.__repr__() == "<Env path='' data='None'>"
assert e2['a']['c'].__repr__() == "<Env path='a.c' data='1'>"
e3 = Env()
e3.set_data('a.c', False)
assert not e3.get_data('a.c').data
e4 = Env()
e4.set_data('d', True)
e3.set_data('a.c', e4)
assert e3.struct_table() == "{ a|False: { c|False: { d|True: {},},},}"
assert e3.get_data('a.c.d').__repr__() == "<Env path='a.c.d' data='True'>"
| StarcoderdataPython |
6409998 | <reponame>Oltanis/undergrad_MC_course
"""Read data associated with test Ising model in ising.py"""
# To ignore pylint numpy "no-member" errors...
# pylint: disable=E1101
import re
import numpy
import inputs.obs
import inputs.util
from inputs.util import Label
from inputs.util import Observable
from inputs.ensemble import EnsembleNVT
from inputs.parameter import Parameter
#from htk.histogram import BetaReweighter
#from htk.histogram import KTReweighter
#from htk.histogram import Reweighter
class IsingModelData(inputs.obs.ObservableData):
"""DataSet object implementation for NVT Ising model data
It is expected that the data are read from a single file
which may be provided to the contructor.
"""
def __init__(self, filename=None):
"""
Create an Ising model data set (in the NVT ensemble)
"""
super(IsingModelData, self).__init__(EnsembleNVT())
if filename is not None:
self.load(filename)
def load(self, filename=None):
"""Load data from file"""
# Parameters
f = open(filename, "r")
with f:
line = f.readline()
line = f.readline()
match = re.search(r" (\d+)$", line)
n = int(match.group(0))
v = 1.0*n*n
line = f.readline()
match = re.search(r" (\w+.\w+)$", line)
j = float(match.group(0))
line = f.readline()
match = re.search(r" (\w+.\w+)$", line)
h = float(match.group(0))
line = f.readline()
match = re.search(r" (\w+.\w+)$", line)
kT = float(match.group(0))
# Load the parameters
self.add_parameter(n*n, Label("N", "Number of spins", None))
self.add_parameter(kT, Label("kT", "Temperature", "k_bT"))
self.add_parameter(v, Label("V", "Volume", "sites"))
self.add_parameter(j, Label("J", "Coupling constant", "k_bT"))
self.add_parameter(h, Label("H", "External field", "k_bT"))
self.data_source = filename
self.data_type = "Ising Model (2d) " + str(n) + "x" + str(n)
# Load the observable data
data = numpy.loadtxt(filename, skiprows=9)
tdata = data[:, 0]
sdata = data[:, 1]
mdata = data[:, 2]
# Form the total energy (per site)
edata = sdata.copy()
edata[:] = - j*sdata[:] - h*mdata[:]
tobs = Observable(tdata, Label("t", "Time", "MC Sweeps"))
sobs = Observable(sdata, Label("S", "Interaction Energy", "k_bT/site"))
mobs = Observable(mdata, Label("M", "Magnetisation", "k_bT/site"))
eobs = Observable(edata, Label("E", "Total Energy", "k_bT/site"))
self.add_observable(tobs, independent_variable=True)
self.add_observable(sobs)
self.add_observable(mobs)
self.add_observable(eobs)
# Reweighters
# Reweighting always takes place via the total energy
# (system, not per site), so introduce a factor of the
# volume
vparam = self.parameter("v")
tparam = self.parameter("kt")
hparam = self.parameter("h")
beta = Parameter(1.0/kT, Label("beta", "Inverse Energy", "1/k_bT"))
rbeta = BetaReweighter("beta", beta, vparam, eobs)
rkt = KTReweighter("kt", tparam, vparam, eobs)
# To reweight wrt external field, a factor of v/kT is
# required as we have magnetistation per site
alpha = Parameter(v/kT, Label("a", "Volume/k_bT", "sites/k_bT"))
rh = Reweighter("h", hparam, alpha, mobs)
self.add_reweighter(rbeta)
self.add_reweighter(rkt)
self.add_reweighter(rh)
def reweight_cv(self, ktnew):
"""A convenience to reweight C_V to a series of new temperatures
Arguments:
ktnew (float or numpy.ndarray): the new temperatures
Returns:
Specfic heat capacity
"""
volume = self.parameter("V")
e0 = self.observable("e").data
e1 = volume*e0[:]
e2 = e1[:]*e1[:]
try:
cvnew = []
for kt in ktnew:
e1r = self.reweighter("kt").reweight_obs(e1, kt)
e2r = self.reweighter("kt").reweight_obs(e2, kt)
cv = htk.util.nvt_cv(e1r, e2r, kt, volume)
cvnew.append(cv)
cvnew = numpy.array(cvnew)
except TypeError:
e1r = self.reweighter("kt").reweight_obs(e1, ktnew)
e2r = self.reweighter("kt").reweight_obs(e2, ktnew)
cvnew = htk.util.nvt_cv(e1r, e2r, ktnew, volume)
return cvnew
| StarcoderdataPython |
6642390 | # Copyright (C) 2018 Google Inc.
# Licensed under http://www.apache.org/licenses/LICENSE-2.0 <see LICENSE file>
"""Unit tests for client module."""
# pylint: disable=protected-access
import unittest
import mock
from ggrc.integrations.synchronization_jobs import assessment_sync_job
from ggrc.integrations import integrations_errors, constants
from ggrc.integrations.synchronization_jobs import sync_utils
class BaseClientTest(unittest.TestCase):
"""Tests basic functions."""
def test_collect_assessment_issues(self):
"""Tests collection issues associated with Assessments."""
assessment1_mock = mock.MagicMock(id=1, status='In Review')
issue1_mock = mock.MagicMock(
issue_tracked_obj=assessment1_mock,
component_id='1',
issue_id='t1',
issue_type='bug1',
issue_priority='P1',
issue_severity='S1',
due_date=None
)
issue2_mock = mock.MagicMock(
issue_tracked_obj=None,
component_id=None,
issue_id='t2',
issue_type='bug2',
issue_priority='P3',
issue_severity='S3',
due_date=None
)
filter_mock = mock.MagicMock()
filter_mock.return_value.order_by.return_value.all.return_value = [
issue1_mock,
issue2_mock,
]
with mock.patch.multiple(
sync_utils.models.IssuetrackerIssue,
query=mock.MagicMock(filter=filter_mock)
):
actual = sync_utils.collect_issue_tracker_info("Assessment")
self.assertEquals(actual, {
't1': {
'object_id': 1,
'component_id': '1',
'state': {
'status': 'In Review',
'type': 'bug1',
'priority': 'P1',
'severity': 'S1',
'due_date': None,
},
}
})
def test_iter_issue_batches(self):
"""Tests fetching issues from Issue Tracer in batches."""
cli_mock = mock.MagicMock()
cli_mock.search.side_effect = iter([
{
'issues': [
{
'issueId': 't1',
'issueState': {
'status': 'FIXED',
'type': 'bug1',
'priority': 'P1',
'severity': 'S1',
'custom_fields': [{
'name': 'Due Date',
'value': '2018-09-13',
'type': 'Date',
'display_string': 'Due Date',
}],
'ccs': []
},
},
{
'issueId': 't2',
'issueState': {
'status': 'FIXED',
'type': 'bug2',
'priority': 'P2',
'severity': 'S2',
'ccs': []
},
},
],
},
])
with mock.patch.object(sync_utils.issues, 'Client', return_value=cli_mock):
actual = list(sync_utils.iter_issue_batches([1, 2, 3]))
self.assertEquals(actual, [
{
't1': {
'status': 'FIXED',
'type': 'bug1',
'priority': 'P1',
'severity': 'S1',
'custom_fields': [{
'name': 'Due Date',
'value': '2018-09-13',
'type': 'Date',
'display_string': 'Due Date',
}],
'ccs': []
},
't2': {
'status': 'FIXED',
'type': 'bug2',
'priority': 'P2',
'severity': 'S2',
'custom_fields': [],
'ccs': []
},
},
])
self.assertEqual(cli_mock.search.call_args_list, [
mock.call({
'issue_ids': [1, 2, 3],
'page_size': 100,
}),
])
def test_iter_issue_batches_error(self):
"""Tests handling error fetching issues from Issue Tracer in batches."""
cli_mock = mock.MagicMock()
cli_mock.search.side_effect = integrations_errors.HttpError('Test')
with mock.patch.object(sync_utils.issues, 'Client', return_value=cli_mock):
actual = list(sync_utils.iter_issue_batches([1, 2, 3]))
self.assertEqual(actual, [])
def test_update_issue(self):
"""Tests updating issue."""
cli_mock = mock.MagicMock()
self.assertIsNotNone(sync_utils.update_issue(cli_mock, 1, 'params'))
cli_mock.update_issue.assert_called_once_with(1, 'params')
def test_update_issue_with_retry(self):
"""Tests updating issue with retry."""
cli_mock = mock.MagicMock()
exception = integrations_errors.HttpError('Test', status=429)
cli_mock.update_issue.side_effect = iter([
exception,
exception,
exception,
exception,
None,
])
with mock.patch.object(sync_utils.time, 'sleep') as sleep_mock:
sync_utils.update_issue(cli_mock, 1, 'params')
self.assertEqual(cli_mock.update_issue.call_args_list, [
mock.call(1, 'params'),
mock.call(1, 'params'),
mock.call(1, 'params'),
mock.call(1, 'params'),
mock.call(1, 'params'),
])
self.assertEqual(sleep_mock.call_args_list, [
mock.call(1),
mock.call(1),
mock.call(1),
mock.call(1),
])
def test_update_issue_with_raise(self):
"""Tests updating issue with raising an exception."""
cli_mock = mock.MagicMock()
exception = integrations_errors.HttpError('Test', status=429)
cli_mock.update_issue.side_effect = iter([
exception,
exception,
exception,
exception,
exception,
])
with mock.patch.object(sync_utils.time, 'sleep') as sleep_mock:
with self.assertRaises(integrations_errors.HttpError) as exc_mock:
sync_utils.update_issue(cli_mock, 1, 'params')
self.assertEqual(exc_mock.exception.status, 429)
self.assertEqual(cli_mock.update_issue.call_args_list, [
mock.call(1, 'params'),
mock.call(1, 'params'),
mock.call(1, 'params'),
mock.call(1, 'params'),
mock.call(1, 'params'),
])
self.assertEqual(sleep_mock.call_args_list, [
mock.call(1),
mock.call(1),
mock.call(1),
mock.call(1),
])
def test_sync_issue_tracker_statuses(self): # pylint: disable=invalid-name
"""Tests issue synchronization flow."""
assessment_issues = {
'1': {
'object_id': 1,
'state': {
'status': 'In Review',
'type': 'BUG1',
'priority': 'P1',
'severity': 'S1',
'due_date': None,
},
},
'2': {
'object_id': 2,
'state': {
'status': 'Not Started',
'type': 'BUG2',
'priority': 'P2',
'severity': 'S2',
'due_date': None,
},
},
}
batches = [
{
1: {
'status': 'FIXED',
'type': 'BUG1',
'priority': 'P1',
'severity': 'S1',
'custom_fields': [],
},
},
{
2: {
'status': 'FIXED',
'type': 'BUG2',
'priority': 'P2',
'severity': 'S2',
'custom_fields': [],
},
3: {
'status': 'FIXED',
'type': 'BUG2',
'priority': 'P2',
'severity': 'S2',
'custom_fields': [],
},
},
]
cli_mock = mock.MagicMock()
cli_patch = mock.patch.object(
sync_utils.issues, 'Client', return_value=cli_mock)
with cli_patch, mock.patch.multiple(
sync_utils,
iter_issue_batches=mock.MagicMock(
return_value=iter(batches)),
update_issue=mock.DEFAULT
):
with mock.patch.object(sync_utils,
"collect_issue_tracker_info",
return_value=assessment_issues):
assessment_sync_job.sync_assessment_attributes()
iter_calls = sync_utils.iter_issue_batches.call_args_list
self.assertEqual(len(iter_calls), 1)
self.assertItemsEqual(iter_calls[0][0][0], ['1', '2'])
sync_utils.update_issue.assert_called_once_with(cli_mock, '2', {
'status': 'ASSIGNED',
'type': 'BUG2',
'priority': 'P2',
'severity': 'S2',
'ccs': [],
'component_id': None
})
def test_due_date_equals(self):
"""Due date current and issue tracker equals."""
custom_fields_payload = [
{
"name": constants.CUSTOM_FIELDS_DUE_DATE,
"value": "2018-10-10",
"type": "DATE",
"display_string": constants.CUSTOM_FIELDS_DUE_DATE
}
]
custom_fields_issuetracker = [
{
constants.CUSTOM_FIELDS_DUE_DATE: "2018-10-10"
},
{
"field1": "value1"
},
{
"field2": "value2"
}
]
due_dates_equals, remove_custom_fields = \
assessment_sync_job._compare_custom_fields(
custom_fields_payload,
custom_fields_issuetracker
)
self.assertTrue(due_dates_equals)
self.assertFalse(remove_custom_fields)
def test_due_date_not_equals(self):
"""Due date current and issue tracker not equals."""
custom_fields_payload = [
{
"name": constants.CUSTOM_FIELDS_DUE_DATE,
"value": "2018-10-10",
"type": "DATE",
"display_string": constants.CUSTOM_FIELDS_DUE_DATE
}
]
custom_fields_issuetracker = [
{
constants.CUSTOM_FIELDS_DUE_DATE: "2018-11-10"
},
{
"field1": "value1"
},
{
"field2": "value2"
}
]
due_dates_equals, remove_custom_fields = \
assessment_sync_job._compare_custom_fields(
custom_fields_payload,
custom_fields_issuetracker
)
self.assertFalse(due_dates_equals)
self.assertFalse(remove_custom_fields)
def test_due_date_is_empty(self):
"""Due date is empty for issue tracker."""
custom_fields_payload = [
{
"name": constants.CUSTOM_FIELDS_DUE_DATE,
"value": "2018-10-10",
"type": "DATE",
"display_string": constants.CUSTOM_FIELDS_DUE_DATE
}
]
custom_fields_issuetracker = [
{
"field1": "value1"
},
{
"field2": "value2"
}
]
due_dates_equals, remove_custom_fields = \
assessment_sync_job._compare_custom_fields(
custom_fields_payload,
custom_fields_issuetracker
)
self.assertTrue(due_dates_equals)
self.assertTrue(remove_custom_fields)
def test_custom_fields_is_empty(self):
"""Custom fields for issue tracker is empty."""
custom_fields_payload = [
{
"name": constants.CUSTOM_FIELDS_DUE_DATE,
"value": "2018-10-10",
"type": "DATE",
"display_string": constants.CUSTOM_FIELDS_DUE_DATE
}
]
custom_fields_issuetracker = []
due_dates_equals, remove_custom_fields = \
assessment_sync_job._compare_custom_fields(
custom_fields_payload,
custom_fields_issuetracker
)
self.assertTrue(due_dates_equals)
self.assertTrue(remove_custom_fields)
def test_group_ccs(self):
"""Group ccs from system and issutracker."""
ccs_payload = [
"<EMAIL>",
"<EMAIL>",
"<EMAIL>"
]
ccs_issuetracker = [
"<EMAIL>",
"<EMAIL>",
"<EMAIL>",
"<EMAIL>"
]
ccs_grouped = \
assessment_sync_job._group_ccs_with_issuetracker(
ccs_payload,
ccs_issuetracker
)
expected_result = [
"<EMAIL>",
"<EMAIL>",
"<EMAIL>",
"<EMAIL>"
]
self.assertListEqual(ccs_grouped, expected_result)
def test_group_ccs_wh_system(self):
"""Group ccs w/h ccs from system."""
ccs_payload = []
ccs_issuetracker = [
"<EMAIL>",
"<EMAIL>",
"<EMAIL>",
"<EMAIL>"
]
ccs_grouped = \
assessment_sync_job._group_ccs_with_issuetracker(
ccs_payload,
ccs_issuetracker
)
self.assertListEqual(ccs_issuetracker, ccs_grouped)
def test_group_ccs_wh_issuetracker(self):
"""Group ccs w/h ccs from issuetracker."""
ccs_payload = [
"<EMAIL>",
"<EMAIL>",
"<EMAIL>"
]
ccs_issuetracker = []
ccs_grouped = \
assessment_sync_job._group_ccs_with_issuetracker(
ccs_payload,
ccs_issuetracker
)
self.assertListEqual(ccs_payload, ccs_grouped)
| StarcoderdataPython |
5178284 | from django.forms.widgets import URLInput
from core.models import Resource
class ResourceInput(URLInput):
"""
URLInput for Resource objects.
"""
def format_value(self, value):
"""
Return a value as it should appear when rendered in a template.
Currently, value might be an int, the primary key to a Resource object.
format_value looks up that Resource and returns its URL. This is dumb
and convoluted, and I should just store the URL directly in each
Nomination or Claim.
"""
if isinstance(value, int):
return Resource.objects.get(id=value).url
else:
return value
| StarcoderdataPython |
1984638 | <reponame>IsaacJT/genologics<gh_stars>0
#!/usr/bin/env python
from __future__ import print_function
from os.path import isdir
import os
import sys
from unittest import TestCase
from genologics.epp import EppLogger
file_path = os.path.realpath(__file__)
test_dir_path = os.path.dirname(file_path)
tmp_dir_path = test_dir_path + '/nose_tmp_output'
CWD = os.getcwd()
class TestLog(TestCase):
def setUp(self):
"""Create temporary dir if necessary,
otherwise clear contents of it"""
if not isdir(tmp_dir_path):
os.mkdir(tmp_dir_path)
self.tearDown()
os.chdir(test_dir_path)
def tearDown(self):
"""remove temporary output files"""
for d in os.listdir(tmp_dir_path):
d_path = os.path.join(tmp_dir_path,d)
try:
os.remove(d_path)
except:
for f in os.listdir(d_path):
f_path = os.path.join(d_path,f)
os.remove(f_path)
os.rmdir(d_path)
assert os.listdir(tmp_dir_path) == []
def test_stderr(self):
""" Stderr should be printed to stderr and logged"""
tmp_file = os.path.join(tmp_dir_path,'tmp_log')
saved_stderr = sys.stderr
tmp_stderr = os.path.join(tmp_dir_path,'tmp_stderr')
with open(tmp_stderr,'w') as sys.stderr:
with EppLogger(tmp_file, prepend=False) as epp_logger:
print('stderr nosetest', file=sys.stderr)
sys.stderr = saved_stderr
with open(tmp_stderr,'r') as stderr:
stream_lines = stderr.readlines()
assert 'stderr nosetest' in stream_lines[-1]
with open(tmp_file,'r') as log_file:
log_lines = log_file.readlines()
assert 'stderr nosetest' in log_lines[-1]
def test_stdout(self):
""" Stdout should be logged but not printed"""
tmp_file = os.path.join(tmp_dir_path,'tmp_log')
saved_stdout = sys.stdout
tmp_stdout = os.path.join(tmp_dir_path,'tmp_stdout')
with open(tmp_stdout,'w') as sys.stdout:
with EppLogger(tmp_file, prepend=False) as epp_logger:
print('stdout nosetest', file=sys.stdout)
sys.stdout = saved_stdout
with open(tmp_stdout,'r') as stdout:
stream_lines = stdout.readlines()
assert not stream_lines
with open(tmp_file,'r') as log_file:
log_lines = log_file.readlines()
assert 'stdout nosetest' in log_lines[-1]
def test_exception(self):
""" Exceptions should be printed and logged"""
# Hard to test, if exceptions are caught in a try statement,
# they will not be printed...
pass
| StarcoderdataPython |
1906651 | <gh_stars>1-10
import pytest
from botus_receptus import config
def test_load(tmp_path):
c = tmp_path / 'config.toml'
c.write_text(
'''[bot]
bot_name = "botty"
discord_api_key = "API_KEY"'''
)
bot_config = config.load(str(c))
assert bot_config['bot_name'] == 'botty'
assert bot_config['discord_api_key'] == 'API_KEY'
assert 'logging' in bot_config
assert bot_config.get('logging', {}).get('log_file') == 'botty.log'
def test_load_logging_config(tmp_path):
c = tmp_path / 'config.toml'
c.write_text(
'''[bot]
bot_name = "botty"
discord_api_key = "API_KEY"
[bot.logging]
log_file = "botty-log.log"
log_to_console = true
log_level = "warning"'''
)
bot_config = config.load(str(c))
assert 'logging' in bot_config
assert bot_config.get('logging', {}).get('log_file') == 'botty-log.log'
assert bot_config.get('logging', {}).get('log_to_console')
assert bot_config.get('logging', {}).get('log_level') == 'warning'
def test_load_no_bot_section(tmp_path):
c = tmp_path / 'config.toml'
c.write_text(
'''[foo]
bot_name = "botty"
discord_api_key = "API_KEY"
'''
)
with pytest.raises(
config.ConfigException, match='"bot" section not in config file'
):
config.load(str(c))
def test_load_no_bot_name(tmp_path):
c = tmp_path / 'config.toml'
c.write_text(
'''[bot]
discord_api_key = "API_KEY"
'''
)
with pytest.raises(
config.ConfigException, match='"bot_name" not specified in the config file'
):
config.load(str(c))
def test_load_no_api_key(tmp_path):
c = tmp_path / 'config.toml'
c.write_text(
'''[bot]
bot_name = "botty"
'''
)
with pytest.raises(
config.ConfigException,
match='"discord_api_key" not specified in the config file',
):
config.load(str(c))
| StarcoderdataPython |
97309 | <reponame>akhiljalan/ctsp-open-data
import numpy as np
import os
def load_embeddings(filename):
'''
Function source: Berkeley CS 194-129, Homework 3.
'''
vocab = []
embed = []
with open(filename, 'r', encoding="utf-8") as f:
for line in f.readlines():
row = line.strip().split(' ')
vocab.append(row[0])
embed.append(row[1:])
embed = np.asarray(embed).astype(np.float32)
return vocab, embed
def load_glove():
'''
Function source: Berkeley CS 194-129, Homework 3.
'''
# Load the GloVe vectors into numpy
glove_filepath = os.path.join('datasets', 'glove.6B.50d.txt')
vocab, embed = load_embeddings(glove_filepath)
vocab_size = len(vocab)
embed_dim = len(embed[0])
assert vocab_size > 0, "The vocabulary shouldn't be empty; did you download the GloVe weights?"
print('Loaded %d %d-dimensional embeddings.' % (vocab_size, embed_dim))
def get_embed_for_words(words, vocab):
word_embeds = []
fin_words = []
for i in range(400000):
for word in words:
if word == vocab[i]:
print('Word {} is in the dataset'.format(vocab[i]))
fin_words.append(word)
word_embeds.append(embed[i])
words.remove(vocab[i])
return fin_words, word_embeds
def compute_sim_matrix(normalized_category_embeds, normalized_tag_embeds):
# convert dtype to float from string.
category_embeds = category_embeds.astype(np.float32)
tag_embed_matrix = tag_embed_matrix.astype(np.float32)
# normalize your embeddings by l2 norm. saves an extra step later
normalized_tag_embeds = tag_embed_matrix * (1 / np.linalg.norm(tag_embed_matrix, axis=0))
normalized_category_embeds = category_embeds * np.expand_dims(1 / np.linalg.norm(category_embeds, axis=1), -1)
#Compute matmul and masking.
sim_matrix = np.matmul(normalized_category_embeds, normalized_tag_embeds)
masked_sim_matrix = sim_matrix * (sim_matrix >= 0)
return masked_sim_matrix
def compute_total_score(sim_matrix):
num_entries = sim_matrix.shape[0] * sim_matrix.shape[1]
return np.sum(sim_matrix) / num_entries | StarcoderdataPython |
182900 | import logging_functions as lf
import time
import randomized_record
import config
CHECK_CONFIGURATION = False
TARGET_ACTIVITIES = [
"com.google.android.gm/.ConversationListActivityGmail",
"com.google.android.gm/.EmlViewerActivityGmail",
"com.google.android.gm/.GmailActivity",
"com.google.android.gm/com.android.mail.ui.settings.PublicPreferenceActivity",
"com.google.android.gm/.ui.MailboxSelectionActivityGmail",
"com.google.android.gm/.CreateShortcutActivityGmail",
"com.google.android.gm/.CreateShortcutActivityGoogleMail",
"com.google.android.gm/com.android.mail.ui.MailActivity",
"com.google.android.gm/.ComposeActivityGmailExternal",
"com.google.android.gm/.ui.MailActivityGmail",
"com.google.android.gm/.ui.MailActivityGmail",
"com.google.android.gm/.ComposeActivityGmailExternal",
#"com.amazon.mShop.android.shopping/com.amazon.mShop.details.ProductDetailsActivity",
#"com.amazon.mShop.android.shopping/com.amazon.mShop.android.home.PublicUrlActivity",
#"com.amazon.mShop.android.shopping/com.amazon.mShop.search.RetailSearchFragmentActivity",
#"com.amazon.mShop.android.shopping/com.amazon.mShop.search.SearchActivity",
]
def launch_activity(activity_target):
return lf.adb("am start -n " + activity_target, get_output=True)
def start_stop_activity(activity_target):
package_name = activity_target.split("/")[0]
print("Package name of " + activity_target + ": " + package_name)
lf.trigger_new_event(activity_target)
launch_activity(activity_target)
time.sleep(config.DELAY_AFTER_LAUNCH)
lf.kill_app(package_name)
time.sleep(config.DELAY_AFTER_KILL)
def check_config():
failed_launches = []
for activity in TARGET_ACTIVITIES:
print("\nAttempt to launch " + activity)
output = launch_activity(activity)
print(output)
if "SecurityException" in output:
failed_launches.append(activity)
time.sleep(1)
print("\n" + str(len(failed_launches)) + " failed launches occured:", failed_launches, "\n")
def main():
print("Record activity launches\n")
lf.start_logging_procedure()
if not CHECK_CONFIGURATION:
randomized_record.acquire_data_randomized(TARGET_ACTIVITIES, config.records_per_app(), start_stop_activity)
else:
check_config()
lf.stop_logging_app()
main()
| StarcoderdataPython |
4910336 | # -*- coding: utf-8 -*-
#
# This file is part of REANA.
# Copyright (C) 2021 CERN.
#
# REANA is free software; you can redistribute it and/or modify it
# under the terms of the MIT License; see LICENSE file for more details.
"""REANA-Workflow-Engine-Snakemake utilities."""
from reana_commons.publisher import WorkflowStatusPublisher
from reana_commons.utils import build_progress_message
from snakemake.jobs import Job
def publish_workflow_start(
workflow_uuid: str, publisher: WorkflowStatusPublisher, job: Job
):
"""Publish to MQ the start of the workflow."""
job_count = len([rule for rule in job.dag.rules if not rule.norun])
total_jobs = {"total": job_count, "job_ids": []}
status_running = 1
publisher.publish_workflow_status(
workflow_uuid,
status=status_running,
message={"progress": build_progress_message(total=total_jobs)},
)
def publish_job_submission(
workflow_uuid: str, publisher: WorkflowStatusPublisher, reana_job_id: str
):
"""Publish to MQ the job submission."""
running_jobs = {"total": 1, "job_ids": [reana_job_id]}
status_running = 1
publisher.publish_workflow_status(
workflow_uuid,
status=status_running,
message={"progress": build_progress_message(running=running_jobs)},
)
| StarcoderdataPython |
3460005 | <reponame>AlexKouzy/ethnicity-facts-and-figures-publisher
from tests.functional.pages import HomePage
def test_site_search_form_action_updated_by_javascript(driver, live_server):
home_page = HomePage(driver, live_server)
home_page.get()
assert home_page.is_current()
expected_url = f"http://localhost:{live_server.port}/search"
assert driver.find_element_by_id("search-form").get_attribute("action") == expected_url
def test_can_search_from_homepage_with_javascript(driver, live_server):
home_page = HomePage(driver, live_server)
home_page.get()
assert home_page.is_current()
home_page.search_site("health")
| StarcoderdataPython |
9640618 | """ Suppose, you are a scientist at NASA. You were supposed to make a dictionary which was supposed to have the city as
one of the keys. The city key is a string with a value of String stored against it. But your product manager understood his mistake,
he thinks location would be a better word if used instead of city.It happened because, most of the employees joining NASA are not from cities
anymore but also in sub urban areas. Location will make the data more accurate.
Write a program to rename a key “city” to a “ location” in the following dictionary.
sample_dict = {
"name": "<NAME>",
"age":70,
"salary": 80000,
"city": "Chittagong"
}
"""
sample_dict = {
"name": "<NAME>",
"age": 70,
"salary": 80000,
"city": "Chittagong"
}
sample_dict_2 = {
"name": "obama",
"age": 60,
"salary": 90000,
"city": "Dhaka"
}
sample_dict_3 = {
"name": "Trump",
"age": 85,
"salary": 30000,
"city": "Uganda"
}
sample_dict_4 = {
"name": "Biden",
"age": 85,
"salary": 100000,
"city": "W.Dc"
}
sample_list = [sample_dict, sample_dict_2, sample_dict_3, sample_dict_4]
def rename_key(sample_dict_to_change):
if sample_dict_to_change.keys().__contains__("city"):
sample_dict_to_change["location"] = sample_dict_to_change.pop("city")
return sample_dict_to_change
def a(list_of_dicts):
""" a() method will modify key of dictionaries and returns modified list of dictionaries """
new_list_of_dicts = []
for dict in list_of_dicts:
new_list_of_dicts.append(rename_key(dict))
return list_of_dicts
print(rename_key(sample_dict))
print(a(sample_list))
| StarcoderdataPython |
6627692 | # This is an auto-generated Django model module.
# You'll have to do the following manually to clean this up:
# * Rearrange models' order
# * Make sure each model has one field with primary_key=True
# Feel free to rename the models, but don't rename db_table values or field names.
#
# Also note: You'll have to insert the output of 'django-admin.py sqlcustom [appname]'
# into your database.
from django.db import models
class Member(models.Model):
class Meta:
db_table = u'members'
verbose_name = u'member'
verbose_name_plural = u'members'
member_id = models.AutoField(primary_key=True)
legacy_id = models.TextField(blank=True) # This field type is a guess.
full_name = models.CharField(max_length=255, blank=True)
given_names = models.CharField(max_length=255, blank=True)
family_name = models.CharField(max_length=255, blank=True)
join_date = models.DateField(null=True, blank=True)
city = models.CharField(max_length=255, blank=True)
state = models.CharField(max_length=255, blank=True)
region = models.CharField(max_length=255, blank=True)
country = models.CharField(max_length=255)
chapter = models.CharField(max_length=100, blank=True)
chapter_id = models.TextField(blank=True) # This field type is a guess.
occupation = models.CharField(max_length=100, blank=True)
citizen = models.TextField() # This field type is a guess.
password = models.CharField(max_length=255, blank=True)
last_changed = models.DateTimeField(null=True, blank=True)
def __unicode__(self):
return u" %s (%s) " % (self.full_name, self.member_id, )
class Chapter(models.Model):
chapter_code = models.CharField(max_length=4, primary_key=True, db_column=u'Chapter_Code') # x.
chapter_descr = models.CharField(max_length=50, db_column=u'Chapter_Descr') # x.
class Meta:
db_table = u'chapter'
class Chapters(models.Model):
# TODO this is not member_id? seems more like a normal pk for ChapterInfo
member_id = models.CharField(max_length=255, primary_key=True) # This field type is a guess.
name = models.CharField(max_length=255, blank=True)
legacy_status = models.CharField(max_length=1, blank=True)
code = models.CharField(max_length=4, blank=True)
contact = models.CharField(max_length=255, blank=True)
rep_id = models.TextField(blank=True) # This field type is a guess.
url = models.CharField(max_length=255, blank=True)
meeting_city = models.CharField(max_length=255, blank=True)
contact_html = models.TextField(blank=True)
contact_text = models.TextField(blank=True)
meeting_text = models.TextField(blank=True)
size = models.CharField(max_length=255, blank=True)
events = models.TextField(blank=True)
comments = models.TextField(blank=True)
fees = models.CharField(max_length=255, blank=True)
display = models.TextField() # This field type is a guess.
class Meta:
db_table = u'chapters'
class CommentsAuthors(models.Model):
id = models.CharField(max_length=12, primary_key=True, db_column=u'Id') # x.
last_name = models.CharField(max_length=50, db_column=u'Last_Name') # x.
first_name = models.CharField(max_length=50, db_column=u'First_Name') # x.
country = models.CharField(max_length=3, db_column=u'Country') # x.
pin = models.TextField(db_column=u'PIN') # x. This field type is a guess.
class Meta:
db_table = u'comments_authors'
class Country(models.Model):
country_code = models.CharField(max_length=2, primary_key=True, db_column=u'Country_Code') # x.
country_descr = models.CharField(max_length=50, db_column=u'Country_Descr') # x.
country_flag = models.CharField(max_length=4, db_column=u'Country_Flag', blank=True) # x.
class Meta:
db_table = u'country'
class Tournament(models.Model):
tournament_code = models.CharField(max_length=20, primary_key=True, db_column=u'Tournament_Code')
description = models.TextField(db_column='Tournament_Descr')
tournament_date = models.DateField(db_column=u'Tournament_Date')
elab_date = models.DateField(db_column=u'Elab_Date')
city = models.CharField(max_length=30, db_column=u'City')
state = models.CharField(max_length=2, db_column=u'State_Code', blank=True)
rounds = models.IntegerField(db_column='Rounds')
total_players = models.IntegerField(db_column='Total_Players')
wall_list = models.TextField(db_column='Wallist')
def __str__(self):
return "%s - on %s with %d players" % (self.tournament_code, self.tournament_date, self.total_players)
def __unicode__(self):
if self.description:
if len(self.description) > 40:
return u'%s...' % self.description[0:37]
return u'%s' % self.description
else:
return u'%s' % self.pk
class Meta:
db_table= u'tournaments'
verbose_name = u'tournament'
verbose_name_plural = u'tournaments'
class Game(models.Model):
game_id = models.AutoField(primary_key=True, db_column=u'Game_ID') # x. This field type is a guess.
game_date = models.DateField(db_column=u'Game_Date') # x.
round = models.TextField(db_column=u'Round') # x. This field type is a guess.
color_1 = models.CharField(max_length=1, db_column=u'Color_1') # x.
rank_1 = models.CharField(max_length=3, db_column=u'Rank_1') # x.
color_2 = models.CharField(max_length=1, db_column=u'Color_2') # x.
rank_2 = models.CharField(max_length=3, db_column=u'Rank_2') # x.
handicap = models.IntegerField(db_column=u'Handicap') # x. This field type is a guess.
komi = models.FloatField(db_column=u'Komi') # x. This field type is a guess.
result = models.CharField(max_length=1, db_column=u'Result') # x.
sgf_code = models.CharField(max_length=26, db_column=u'Sgf_Code', blank=True) # x.
online = models.TextField(db_column=u'Online', blank=True) # x. This field type is a guess.
exclude = models.TextField(db_column=u'Exclude', blank=True) # x. This field type is a guess.
rated = models.TextField(db_column=u'Rated', blank=True) # x. This field type is a guess.
elab_date = models.DateField(db_column=u'Elab_Date') # x.
tournament_code = models.ForeignKey(Tournament, related_name='games_in_tourney', db_column=u'Tournament_Code') # .
pin_player_1 = models.ForeignKey(Member, db_column=u'Pin_Player_1', related_name='games_as_p1')
pin_player_2 = models.ForeignKey(Member, db_column=u'Pin_Player_2', related_name='games_as_p2')
class Meta:
db_table = u'games'
verbose_name = u'game'
verbose_name_plural = u'games'
def __unicode__(self):
return u"Tournament %s Round %s, %s vs %s" % (self.tournament_code,
self.round, self.pin_player_1, self.pin_player_2)
def __str__(self):
return str(self.__unicode__())
def player_other_than(self, one_player):
""" returns the member of the other player. """
return self.pin_player_2 if (one_player == self.pin_player_1) else self.pin_player_1
def winner(self):
if self.result == self.color_1:
return self.pin_player_1
if self.result == self.color_2:
return self.pin_player_2
raise ValueError
def won_by(self, p1):
return self.winner() == p1
class Rating(models.Model):
pin_player = models.ForeignKey(Member, db_column=u'Pin_Player', related_name='ratings_set', primary_key=True)
tournament = models.ForeignKey(Tournament, db_column=u'Tournament_Code', related_name='ratings_set')
rating = models.FloatField(db_column=u'Rating') # x. This field type is a guess.
sigma = models.FloatField(db_column=u'Sigma') # x. This field type is a guess.
elab_date = models.DateField(db_column=u'Elab_Date')
class Meta:
db_table = u'ratings'
class MembersRegions(models.Model):
region_id = models.CharField(max_length=255, primary_key=True) # This field type is a guess.
region = models.CharField(max_length=255, blank=True)
states = models.CharField(max_length=255, blank=True)
class Meta:
db_table = u'members_regions'
class Membership(models.Model):
mtype = models.CharField(max_length=8, primary_key=True, db_column=u'MType') # x.
membership_type = models.CharField(max_length=35, db_column=u'Membership_Type') # x.
class Meta:
db_table = u'membership'
| StarcoderdataPython |
5029851 | <filename>cbuildbot/afdo.py
# -*- coding: utf-8 -*-
# Copyright 2014 The Chromium OS Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Module containing the various utilities to build Chrome with AFDO.
For a description of AFDO see gcc.gnu.org/wiki/AutoFDO.
"""
from __future__ import print_function
import collections
import datetime
import glob
import json
import os
import re
import sys
from chromite.lib import constants, cros_build_lib
from chromite.lib import cros_logging as logging
from chromite.lib import (failures_lib, git, gs, osutils, path_util,
timeout_util)
assert sys.version_info >= (3, 6), 'This module requires Python 3.6+'
# AFDO-specific constants.
AFDO_SUFFIX = '.afdo'
COMPRESSION_SUFFIX = '.bz2'
# Chrome URL where AFDO data is stored.
_gsurls = {}
AFDO_CHROOT_ROOT = os.path.join('%(build_root)s', constants.DEFAULT_CHROOT_DIR)
AFDO_LOCAL_DIR = os.path.join('%(root)s', 'tmp')
AFDO_BUILDROOT_LOCAL = AFDO_LOCAL_DIR % {'root': AFDO_CHROOT_ROOT}
CHROME_ARCH_VERSION = '%(package)s-%(arch)s-%(version)s'
CHROME_PERF_AFDO_FILE = '%s.perf.data' % CHROME_ARCH_VERSION
CHROME_AFDO_FILE = '%s%s' % (CHROME_ARCH_VERSION, AFDO_SUFFIX)
CHROME_ARCH_RELEASE = '%(package)s-%(arch)s-%(release)s'
LATEST_CHROME_AFDO_FILE = 'latest-%s%s' % (CHROME_ARCH_RELEASE, AFDO_SUFFIX)
CHROME_DEBUG_BIN = os.path.join('%(root)s', 'build/%(board)s/usr/lib/debug',
'opt/google/chrome/chrome.debug')
# regex to find AFDO file for specific architecture within the ebuild file.
CHROME_EBUILD_AFDO_REGEX = (
r'^(?P<bef>AFDO_FILE\["%s"\]=")(?P<name>.*)(?P<aft>")')
# and corresponding replacement string.
CHROME_EBUILD_AFDO_REPL = r'\g<bef>%s\g<aft>'
GSURL_BASE_BENCH = 'gs://chromeos-prebuilt/afdo-job/llvm'
GSURL_BASE_CWP = 'gs://chromeos-prebuilt/afdo-job/cwp/chrome'
GSURL_BASE_RELEASE = 'gs://chromeos-prebuilt/afdo-job/release-merged'
GSURL_CHROME_PERF = os.path.join(GSURL_BASE_BENCH,
CHROME_PERF_AFDO_FILE + COMPRESSION_SUFFIX)
GSURL_CHROME_AFDO = os.path.join(GSURL_BASE_BENCH,
CHROME_AFDO_FILE + COMPRESSION_SUFFIX)
GSURL_LATEST_CHROME_AFDO = os.path.join(GSURL_BASE_BENCH,
LATEST_CHROME_AFDO_FILE)
GSURL_CHROME_DEBUG_BIN = os.path.join(
GSURL_BASE_BENCH, CHROME_ARCH_VERSION + '.debug' + COMPRESSION_SUFFIX)
AFDO_GENERATE_LLVM_PROF = '/usr/bin/create_llvm_prof'
# An AFDO data is considered stale when BOTH of the following two metrics don't
# meet. For example, if an AFDO data is generated 20 days ago but only 5 builds
# away, it is considered valid.
# How old can the AFDO data be? (in days).
AFDO_ALLOWED_STALE_DAYS = 42
# How old can the AFDO data be? (in difference of builds).
AFDO_ALLOWED_STALE_BUILDS = 7
# How old can the Kernel AFDO data be? (in days).
KERNEL_ALLOWED_STALE_DAYS = 42
# How old can the Kernel AFDO data be before sheriff got noticed? (in days).
KERNEL_WARN_STALE_DAYS = 14
# Set of boards that can generate the AFDO profile (can generate 'perf'
# data with LBR events). Currently, it needs to be a device that has
# at least 4GB of memory.
#
# This must be consistent with the definitions in autotest.
AFDO_DATA_GENERATORS_LLVM = ('chell')
AFDO_ALERT_RECIPIENTS = [
'<EMAIL>'
]
KERNEL_PROFILE_URL = 'gs://chromeos-prebuilt/afdo-job/cwp/kernel/'
KERNEL_PROFILE_LS_PATTERN = '*/*.gcov.xz'
KERNEL_PROFILE_NAME_PATTERN = (
r'([0-9]+\.[0-9]+)/R([0-9]+)-([0-9]+)\.([0-9]+)-([0-9]+)\.gcov\.xz')
KERNEL_PROFILE_MATCH_PATTERN = (
r'^AFDO_PROFILE_VERSION="R[0-9]+-[0-9]+\.[0-9]+-[0-9]+"$')
KERNEL_PROFILE_WRITE_PATTERN = 'AFDO_PROFILE_VERSION="R%d-%d.%d-%d"'
KERNEL_EBUILD_ROOT = os.path.join(
constants.SOURCE_ROOT, 'src/third_party/chromiumos-overlay/sys-kernel')
# Kernels that we can't generate afdo anymore because of reasons like
# too few samples etc.
KERNEL_SKIP_AFDO_UPDATE = ['3.8', '3.14']
GSURL_CWP_SUBDIR = {
'silvermont': '',
'airmont': 'airmont',
'broadwell': 'broadwell',
}
# Relative weights we should use when merging our 'release' profiles. The
# counters in our benchmark/cwp profiles end up being multiplied by these
# numbers, so they can technically be anything, but we have them sum to 100 for
# ease of understanding.
_RELEASE_BENCHMARK_MERGE_WEIGHT = 25
_RELEASE_CWP_MERGE_WEIGHT = 75
# Filename pattern of CWP profiles for Chrome
CWP_CHROME_PROFILE_NAME_PATTERN = r'R%s-%s.%s-%s' + AFDO_SUFFIX + '.xz'
BENCHMARK_PROFILE_NAME_RE = re.compile(
r"""
^chromeos-chrome-amd64-
(\d+)\. # Major
(\d+)\. # Minor
(\d+)\. # Build
(\d+) # Patch
(?:_rc)?-r(\d+) # Revision
(-merged)?\.
afdo(?:\.bz2)?$ # We don't care about the presence of .bz2,
# so we use the ignore-group '?:' operator.
""", re.VERBOSE)
BenchmarkProfileVersion = collections.namedtuple(
'BenchmarkProfileVersion',
['major', 'minor', 'build', 'patch', 'revision', 'is_merged'])
class MissingAFDOData(failures_lib.StepFailure):
"""Exception thrown when necessary AFDO data is missing."""
class MissingAFDOMarkers(failures_lib.StepFailure):
"""Exception thrown when necessary ebuild markers for AFDO are missing."""
class UnknownKernelVersion(failures_lib.StepFailure):
"""Exception thrown when the Kernel version can't be inferred."""
class NoValidProfileFound(failures_lib.StepFailure):
"""Exception thrown when there is no valid profile found."""
def CompressAFDOFile(to_compress, buildroot):
"""Compress file used by AFDO process.
Args:
to_compress: File to compress.
buildroot: buildroot where to store the compressed data.
Returns:
Name of the compressed data file.
"""
local_dir = AFDO_BUILDROOT_LOCAL % {'build_root': buildroot}
dest = os.path.join(local_dir, os.path.basename(to_compress)) + \
COMPRESSION_SUFFIX
cros_build_lib.CompressFile(to_compress, dest)
return dest
def UncompressAFDOFile(to_decompress, buildroot):
"""Decompress file used by AFDO process.
Args:
to_decompress: File to decompress.
buildroot: buildroot where to store the decompressed data.
"""
local_dir = AFDO_BUILDROOT_LOCAL % {'build_root': buildroot}
basename = os.path.basename(to_decompress)
dest_basename = basename.rsplit('.', 1)[0]
dest = os.path.join(local_dir, dest_basename)
cros_build_lib.UncompressFile(to_decompress, dest)
return dest
def GSUploadIfNotPresent(gs_context, src, dest):
"""Upload a file to GS only if the file does not exist.
Will not generate an error if the file already exist in GS. It will
only emit a warning.
I could use GSContext.Copy(src,dest,version=0) here but it does not seem
to work for large files. Using GSContext.Exists(dest) instead. See
crbug.com/395858.
Args:
gs_context: GS context instance.
src: File to copy.
dest: Destination location.
Returns:
True if file was uploaded. False otherwise.
"""
if gs_context.Exists(dest):
logging.warning('File %s already in GS', dest)
return False
else:
gs_context.Copy(src, dest, acl='public-read')
return True
def GetAFDOPerfDataURL(cpv, arch):
"""Return the location URL for the AFDO per data file.
Build the URL for the 'perf' data file given the release and architecture.
Args:
cpv: The package_info.CPV object for chromeos-chrome.
arch: architecture we're going to build Chrome for.
Returns:
URL of the location of the 'perf' data file.
"""
# The file name of the perf data is based only in the chrome version.
# The test case that produces it does not know anything about the
# revision number.
# TODO(llozano): perf data filename should include the revision number.
version_number = cpv.version_no_rev.split('_')[0]
chrome_spec = {
'package': cpv.package,
'arch': arch,
'version': version_number
}
return GSURL_CHROME_PERF % chrome_spec
def CheckAFDOPerfData(cpv, arch, gs_context):
"""Check whether AFDO perf data exists for the given architecture.
Check if 'perf' data file for this architecture and release is available
in GS.
Args:
cpv: The package_info.CPV object for chromeos-chrome.
arch: architecture we're going to build Chrome for.
gs_context: GS context to retrieve data.
Returns:
True if AFDO perf data is available. False otherwise.
"""
url = GetAFDOPerfDataURL(cpv, arch)
if not gs_context.Exists(url):
logging.info('Could not find AFDO perf data at %s', url)
return False
logging.info('Found AFDO perf data at %s', url)
return True
def WaitForAFDOPerfData(cpv,
arch,
buildroot,
gs_context,
timeout=constants.AFDO_GENERATE_TIMEOUT):
"""Wait for AFDO perf data to show up (with an appropriate timeout).
Wait for AFDO 'perf' data to show up in GS and copy it into a temp
directory in the buildroot.
Args:
arch: architecture we're going to build Chrome for.
cpv: CPV object for Chrome.
buildroot: buildroot where AFDO data should be stored.
gs_context: GS context to retrieve data.
timeout: How long to wait total, in seconds.
Returns:
True if found the AFDO perf data before the timeout expired.
False otherwise.
"""
try:
timeout_util.WaitForReturnTrue(
CheckAFDOPerfData,
func_args=(cpv, arch, gs_context),
timeout=timeout,
period=constants.SLEEP_TIMEOUT)
except timeout_util.TimeoutError:
logging.info('Could not find AFDO perf data before timeout')
return False
url = GetAFDOPerfDataURL(cpv, arch)
dest_dir = AFDO_BUILDROOT_LOCAL % {'build_root': buildroot}
dest_path = os.path.join(dest_dir, url.rsplit('/', 1)[1])
gs_context.Copy(url, dest_path)
UncompressAFDOFile(dest_path, buildroot)
logging.info('Retrieved AFDO perf data to %s', dest_path)
return True
def _BuildrootToWorkDirs(buildroot):
chroot_root = AFDO_CHROOT_ROOT % {'build_root': buildroot}
local_dir = AFDO_LOCAL_DIR % {'root': chroot_root}
in_chroot_local_dir = AFDO_LOCAL_DIR % {'root': ''}
return chroot_root, local_dir, in_chroot_local_dir
def _EnumerateMostRecentProfiles(gs_context, milestones, glob_url,
parse_profile_name):
"""Enumerates the most recent AFDO profiles for the given Chrome releases.
Args:
gs_context: How we talk to gs://
milestones: A list of ints; each one is a major Chrome version. We'll
try to get the most recent profile for each of these.
glob_url: A URL to query gsutil with.
parse_profile_name: A callable that transforms a profile's filename into
an object that:
- is orderable such that |a < b| implies that |a| is an older profile
than |b|
- has a |major| attribute that indicates Chrome's major version number
Alternatively, if it returns None, we skip the given profile.
Returns:
A dict of {milestone_number: latest_profile_gs_url}. The keys in this
milestone are a (not-strict) subset of the values in |milestones|.
"""
profile_listing = gs_context.List(glob_url)
if not profile_listing:
raise ValueError('No profiles matched %s' % glob_url)
parsed_profiles = []
for profile in profile_listing:
url = profile.url
parsed = parse_profile_name(os.path.basename(url))
if parsed is not None:
parsed_profiles.append((parsed, url))
newest = {}
for version in milestones:
profiles = [(v, url) for v, url in parsed_profiles if v.major == version]
if not profiles:
continue
_, url = max(profiles)
newest[version] = url
return newest
def _EnumerateMostRecentCWPProfiles(gs_context, milestones):
"""Enumerates the most recent CWP AFDO profiles for Chrome releases.
See _EnumerateMostRecentProfiles for info about args/return value.
"""
profile_suffix = AFDO_SUFFIX + '.xz'
glob_url = os.path.join(GSURL_BASE_CWP, '*' + profile_suffix)
# e.g. R75-3729.38-1554716539.afdo.xz
profile_name_re = re.compile(
r"""
^R(\d+)- # Major
(\d+)\. # Build
(\d+)- # Patch
(\d+) # Clock; breaks ties sometimes.
\.afdo\.xz$
""", re.VERBOSE)
ProfileVersion = collections.namedtuple('ProfileVersion',
['major', 'build', 'patch', 'clock'])
def parse_profile_name(url_basename):
match = profile_name_re.match(url_basename)
if not match:
raise ValueError('Unparseable CWP profile name: %s' % url_basename)
return ProfileVersion(*[int(x) for x in match.groups()])
return _EnumerateMostRecentProfiles(gs_context, milestones, glob_url,
parse_profile_name)
def _ParseBenchmarkProfileName(profile_name):
match = BENCHMARK_PROFILE_NAME_RE.match(profile_name)
if not match:
raise ValueError('Unparseable benchmark profile name: %s' % profile_name)
groups = match.groups()
version_groups = groups[:-1]
is_merged = groups[-1]
return BenchmarkProfileVersion(
*[int(x) for x in version_groups], is_merged=bool(is_merged))
def _EnumerateMostRecentBenchmarkProfiles(gs_context, milestones):
"""Enumerates the most recent benchmark AFDO profiles for Chrome releases.
See _EnumerateMostRecentProfiles for info about args/return value.
"""
profile_suffix = AFDO_SUFFIX + COMPRESSION_SUFFIX
glob_url = os.path.join(GSURL_BASE_BENCH, '*' + profile_suffix)
def parse_profile_name(url_basename):
parsed = _ParseBenchmarkProfileName(url_basename)
# We don't want to merge a merged profile; merged profiles are primarily
# for stability, and we have CWP to provide us that.
return None if parsed.is_merged else parsed
return _EnumerateMostRecentProfiles(gs_context, milestones, glob_url,
parse_profile_name)
def GenerateReleaseProfileMergePlan(gs_context, milestones):
"""Generates a plan to merge release profiles for Chrome milestones.
Args:
gs_context: How we talk to gs://
milestones: A list of ints; Chrome milestones
Returns:
A tuple (a, b), where:
- |b| is a dict of {milestone: (cwp_profile, benchmark_profile)}, where
|benchmark_profile| and |cwp_profile| are paths in gs:// that point to
the most recent benchmark and CWP profiles for |milestone|.
- |a| is a sorted list of milestones that aren't present in |b|, but are
present in |milestones|.
"""
benchmark_profiles = _EnumerateMostRecentBenchmarkProfiles(
gs_context, milestones)
cwp_profiles = _EnumerateMostRecentCWPProfiles(gs_context, milestones)
planned_merges = {
version: (cwp_profiles[version], benchmark_profile)
for version, benchmark_profile in benchmark_profiles.items()
if version in cwp_profiles
}
skipped = sorted(set(milestones) - set(planned_merges))
return skipped, planned_merges
def ExecuteReleaseProfileMergePlan(gs_context, buildroot, merge_plan):
"""Generates release profiles, given a release profile merge plan.
Args:
gs_context: How we talk to gs://
buildroot: Our buildroot
merge_plan: The second result of GenerateReleaseProfileMergePlan. This
determines the profiles we pull and merge.
"""
_, work_dir, chroot_work_dir = _BuildrootToWorkDirs(buildroot)
def path_pair(suffix):
outside_chroot = os.path.join(work_dir, suffix)
in_chroot = os.path.join(chroot_work_dir, suffix)
return in_chroot, outside_chroot
chroot_work_dir, work_dir = path_pair('afdo_data_merge')
def copy_profile(gs_path, local_path):
assert local_path.endswith('.afdo'), local_path
assert not gs_path.endswith('.afdo'), gs_path
compression_suffix = os.path.splitext(gs_path)[1]
temp_path = local_path + compression_suffix
gs_context.Copy(gs_path, temp_path)
cros_build_lib.UncompressFile(temp_path, local_path)
merge_results = {}
for version, (cwp_profile, benchmark_profile) in merge_plan.items():
chroot_benchmark_path, benchmark_path = path_pair('benchmark.afdo')
copy_profile(benchmark_profile, benchmark_path)
chroot_cwp_path, cwp_path = path_pair('cwp.afdo')
copy_profile(cwp_profile, cwp_path)
chroot_merged_path, merged_path = path_pair('m%d.afdo' % version)
merge_weights = [
(chroot_cwp_path, _RELEASE_CWP_MERGE_WEIGHT),
(chroot_benchmark_path, _RELEASE_BENCHMARK_MERGE_WEIGHT),
]
_MergeAFDOProfiles(merge_weights, chroot_merged_path, use_compbinary=True)
comp_merged_path = merged_path + COMPRESSION_SUFFIX
cros_build_lib.CompressFile(merged_path, comp_merged_path)
merge_results[version] = comp_merged_path
return merge_results
def UploadReleaseProfiles(gs_context, run_id, merge_plan, merge_results):
"""Uploads the profiles in merge_results to our release profile bucket.
Args:
gs_context: Our GS context
run_id: A unique identifier for this run. Generally recommended to be the
number of seconds since the unix epoch, or something similarly difficult
to 'collide' with other runs. This is used in paths to guarantee
uniqueness.
merge_plan: The merge plan that generated the given |merge_results|. Only
used to write to a metadata file, so we know what went into this profile.
merge_results: A map describing the profiles to upload; you can get one
from ExecuteReleaseProfileMergePlan.
"""
gs_url_base = os.path.join(GSURL_BASE_RELEASE, run_id)
def copy_file_to_gs(local_path, remote_path):
# Note that version=0 implies that we'll never overwrite anything. If
# run_id is truly unique, this should never make a difference.
gs_context.Copy(local_path, remote_path, acl='public-read', version=0)
for version, profile in merge_results.items():
suffix = os.path.splitext(profile)[1]
assert suffix != '.afdo', 'All profiles should be compressed.'
output_path = os.path.join(gs_url_base,
'profiles/m%d.afdo%s' % (version, suffix))
copy_file_to_gs(profile, output_path)
# Write a map describing the profiles that have been uploaded. Not
# compressed, because it's expected to be <500 bytes. At the time of writing,
# no automated system relies on these; we just write them so it's easier to
# understand what 'gs://path/to/profiles/m75.afdo' actually consists of.
temp_dir = osutils.GetGlobalTempDir()
meta_file_path = os.path.join(temp_dir, 'meta.json')
osutils.WriteFile(meta_file_path, json.dumps(merge_plan))
copy_file_to_gs(meta_file_path, os.path.join(gs_url_base, 'meta.json'))
def _MergeAFDOProfiles(chroot_profile_list,
chroot_output_profile,
use_compbinary=False):
"""Merges the given profile list.
Args:
chroot_profile_list: a list of (profile_path, profile_weight).
Profile_weight is an int that tells us how to weight the profile compared
to everything else.
chroot_output_profile: where to store the result profile.
use_compbinary: whether to use the new compressed binary AFDO profile
format.
"""
if not chroot_profile_list:
raise ValueError('Need profiles to merge')
# A regular llvm-profdata command looks like:
# llvm-profdata merge [-sample] -output=/path/to/output input1 [input2
# [input3 ...]]
#
# Alternatively, we can specify inputs by `-weighted-input=A,file`, where A
# is a multiplier of the sample counts in the profile.
merge_command = [
'llvm-profdata',
'merge',
'-sample',
'-output=' + chroot_output_profile,
]
merge_command += [
'-weighted-input=%d,%s' % (weight, name)
for name, weight in chroot_profile_list
]
if use_compbinary:
merge_command.append('-compbinary')
cros_build_lib.run(
merge_command, enter_chroot=True, capture_output=True, print_cmd=True)
def _RemoveIndirectCallTargetsFromProfile(chroot_input_path,
chroot_output_path):
"""Removes indirect call targets from the given profile.
Args:
chroot_input_path: the profile in the chroot to remove indirect call
targets from.
chroot_output_path: where to drop the new profile. May be the same name as
chroot_input_path.
"""
removal_script = ('/mnt/host/source/src/third_party/toolchain-utils/'
'afdo_redaction/remove_indirect_calls.py')
def UniqueChrootFilePath(path):
# If this fires, we can do stuff with tempfile. I don't think it
# will, and I like path names without a lot of garbage in them.
if os.path.exists(path_util.FromChrootPath(path)):
raise ValueError('Path %r already exists; refusing to overwrite.' % path)
return path
input_as_txt = UniqueChrootFilePath(chroot_input_path + '.txt')
# This is mostly here because yapf has ugly formatting when we do
# foo(['a long list that', 'wraps on multiple', 'lines'], kwarg=1, kwarg=2)
def RunCommand(cmd):
cros_build_lib.run(cmd, enter_chroot=True, print_cmd=True)
RunCommand([
'llvm-profdata',
'merge',
'-sample',
'-output=' + input_as_txt,
'-text',
chroot_input_path,
])
output_as_txt = UniqueChrootFilePath(chroot_output_path + '.txt')
RunCommand([
removal_script,
'--input=' + input_as_txt,
'--output=' + output_as_txt,
])
# FIXME: Maybe want to use compbinary here.
RunCommand([
'llvm-profdata',
'merge',
'-sample',
'-output=' + chroot_output_path,
output_as_txt,
])
def _CompressAndUploadAFDOProfileIfNotPresent(gs_context, buildroot, gsurl_base,
profile_to_upload_path):
"""Compresses and potentially uploads the given profile."""
compressed_path = CompressAFDOFile(profile_to_upload_path, buildroot)
compressed_basename = os.path.basename(compressed_path)
gs_target = os.path.join(gsurl_base, compressed_basename)
uploaded = GSUploadIfNotPresent(gs_context, compressed_path, gs_target)
return uploaded
def CreateAndUploadMergedAFDOProfile(gs_context,
buildroot,
unmerged_name,
recent_to_merge=5,
max_age_days=14):
"""Create a merged AFDO profile from recent AFDO profiles and upload it.
If the upload would overwrite an existing merged file, this skips the upload.
Args:
gs_context: GS Context
buildroot: The build root
unmerged_name: name of the AFDO profile we've just uploaded. No profiles
whose names are lexicographically ordered after this are candidates for
selection.
recent_to_merge: The maximum number of profiles to merge
max_age_days: Don't merge profiles older than max_age_days days old.
Returns:
A (str, bool) of:
- The name of a merged profile in GSURL_BASE_BENCH if the AFDO profile is
a candidate for merging. Otherwise, None.
- Whether we uploaded a merged profile.
"""
_, work_dir, chroot_work_dir = _BuildrootToWorkDirs(buildroot)
profile_suffix = AFDO_SUFFIX + COMPRESSION_SUFFIX
merged_suffix = '-merged'
glob_url = os.path.join(GSURL_BASE_BENCH, '*' + profile_suffix)
benchmark_listing = gs_context.List(glob_url, details=True)
unmerged_version = _ParseBenchmarkProfileName(unmerged_name)
def get_ordered_mergeable_profiles(benchmark_listing):
"""Returns a list of mergeable profiles ordered by increasing version."""
profile_versions = [(_ParseBenchmarkProfileName(os.path.basename(x.url)), x)
for x in benchmark_listing]
# Exclude merged profiles, because merging merged profiles into merged
# profiles is likely bad.
candidates = [(version, x)
for version, x in profile_versions
if unmerged_version >= version and not version.is_merged]
candidates.sort()
return [x for _, x in candidates]
benchmark_profiles = get_ordered_mergeable_profiles(benchmark_listing)
if not benchmark_profiles:
logging.warning('Skipping merged profile creation: no merge candidates '
'found')
return None, False
base_time = benchmark_profiles[-1].creation_time
time_cutoff = base_time - datetime.timedelta(days=max_age_days)
merge_candidates = [
p for p in benchmark_profiles if p.creation_time >= time_cutoff
]
merge_candidates = merge_candidates[-recent_to_merge:]
# This should never happen, but be sure we're not merging a profile into
# itself anyway. It's really easy for that to silently slip through, and can
# lead to overrepresentation of a single profile, which just causes more
# noise.
assert len(set(p.url for p in merge_candidates)) == len(merge_candidates)
# Merging a profile into itself is pointless.
if len(merge_candidates) == 1:
logging.warning('Skipping merged profile creation: we only have a single '
'merge candidate.')
return None, False
chroot_afdo_files = []
for candidate in merge_candidates:
# It would be slightly less complex to just name these off as
# profile-1.afdo, profile-2.afdo, ... but the logs are more readable if we
# keep the basename from gs://.
candidate_name = os.path.basename(candidate.url)
candidate_uncompressed_name = candidate_name[:-len(COMPRESSION_SUFFIX)]
copy_from = candidate.url
copy_to = os.path.join(work_dir, candidate_name)
copy_to_uncompressed = os.path.join(work_dir, candidate_uncompressed_name)
chroot_file = os.path.join(chroot_work_dir, candidate_uncompressed_name)
gs_context.Copy(copy_from, copy_to)
cros_build_lib.UncompressFile(copy_to, copy_to_uncompressed)
chroot_afdo_files.append(chroot_file)
afdo_basename = os.path.basename(chroot_afdo_files[-1])
assert afdo_basename.endswith(AFDO_SUFFIX)
afdo_basename = afdo_basename[:-len(AFDO_SUFFIX)]
raw_merged_basename = 'raw-' + afdo_basename + merged_suffix + AFDO_SUFFIX
chroot_raw_merged_output_path = os.path.join(chroot_work_dir,
raw_merged_basename)
# Weight all profiles equally.
_MergeAFDOProfiles([(profile, 1) for profile in chroot_afdo_files],
chroot_raw_merged_output_path)
profile_to_upload_basename = afdo_basename + merged_suffix + AFDO_SUFFIX
profile_to_upload_path = os.path.join(work_dir, profile_to_upload_basename)
chroot_profile_to_upload_path = os.path.join(chroot_work_dir,
profile_to_upload_basename)
_RemoveIndirectCallTargetsFromProfile(chroot_raw_merged_output_path,
chroot_profile_to_upload_path)
result_basename = os.path.basename(profile_to_upload_path)
return result_basename, _CompressAndUploadAFDOProfileIfNotPresent(
gs_context, buildroot, GSURL_BASE_BENCH, profile_to_upload_path)
def PatchChromeEbuildAFDOFile(ebuild_file, profiles):
"""Patch the Chrome ebuild with the dictionary of {arch: afdo_file} pairs.
Args:
ebuild_file: path of the ebuild file within the chroot.
profiles: {source: afdo_file} pairs to put into the ebuild.
"""
original_ebuild = path_util.FromChrootPath(ebuild_file)
modified_ebuild = '%s.new' % original_ebuild
patterns = {}
repls = {}
markers = {}
for source in profiles.keys():
patterns[source] = re.compile(CHROME_EBUILD_AFDO_REGEX % source)
repls[source] = CHROME_EBUILD_AFDO_REPL % profiles[source]
markers[source] = False
with open(original_ebuild, 'r') as original:
with open(modified_ebuild, 'w') as modified:
for line in original:
for source in profiles.keys():
matched = patterns[source].match(line)
if matched:
markers[source] = True
modified.write(patterns[source].sub(repls[source], line))
break
else: # line without markers, just copy it.
modified.write(line)
for source, found in markers.items():
if not found:
raise MissingAFDOMarkers('Chrome ebuild file does not have appropriate '
'AFDO markers for source %s' % source)
os.rename(modified_ebuild, original_ebuild)
def UpdateManifest(ebuild_file, ebuild_prog='ebuild'):
"""Regenerate the Manifest file.
Args:
ebuild_file: path to the ebuild file
ebuild_prog: the ebuild command; can be board specific
"""
gen_manifest_cmd = [ebuild_prog, ebuild_file, 'manifest', '--force']
cros_build_lib.run(gen_manifest_cmd, enter_chroot=True, print_cmd=True)
def CommitIfChanged(ebuild_dir, message):
"""If there are changes to ebuild or Manifest, commit them.
Args:
ebuild_dir: the path to the directory of ebuild in the chroot
message: commit message
"""
# Check if anything changed compared to the previous version.
modifications = git.RunGit(
ebuild_dir, ['status', '--porcelain', '-uno'],
capture_output=True,
print_cmd=True).output
if not modifications:
logging.info('AFDO info for the ebuilds did not change. '
'Nothing to commit')
return
git.RunGit(ebuild_dir, ['commit', '-a', '-m', message], print_cmd=True)
def UpdateChromeEbuildAFDOFile(board, profiles):
"""Update chrome ebuild with the dictionary of {arch: afdo_file} pairs.
Modifies the Chrome ebuild to set the appropriate AFDO file for each
given architecture. Regenerates the associated Manifest file and
commits the new ebuild and Manifest.
Args:
board: board we are building Chrome for.
profiles: {arch: afdo_file} pairs to put into the ebuild.
These are profiles from selected benchmarks.
"""
# Find the Chrome ebuild file.
equery_prog = 'equery'
ebuild_prog = 'ebuild'
if board:
equery_prog += '-%s' % board
ebuild_prog += '-%s' % board
equery_cmd = [equery_prog, 'w', 'chromeos-chrome']
ebuild_file = cros_build_lib.run(
equery_cmd, enter_chroot=True, stdout=True).output.rstrip()
# Patch the ebuild file with the names of the available afdo_files.
PatchChromeEbuildAFDOFile(ebuild_file, profiles)
# Also patch the 9999 ebuild. This is necessary because the uprev
# process starts from the 9999 ebuild file and then compares to the
# current version to see if the uprev is really necessary. We dont
# want the names of the available afdo_files to show as differences.
# It also allows developers to do USE=afdo_use when using the 9999
# ebuild.
ebuild_9999 = os.path.join(
os.path.dirname(ebuild_file), 'chromeos-chrome-9999.ebuild')
PatchChromeEbuildAFDOFile(ebuild_9999, profiles)
UpdateManifest(ebuild_9999, ebuild_prog)
ebuild_dir = path_util.FromChrootPath(os.path.dirname(ebuild_file))
CommitIfChanged(ebuild_dir, 'Update profiles and manifests for Chrome.')
def VerifyLatestAFDOFile(afdo_release_spec, buildroot, gs_context):
"""Verify that the latest AFDO profile for a release is suitable.
Find the latest AFDO profile file for a particular release and check
that it is not too stale. The latest AFDO profile name for a release
can be found in a file in GS under the name
latest-chrome-<arch>-<release>.afdo.
Args:
afdo_release_spec: architecture and release to find the latest AFDO
profile for.
buildroot: buildroot where AFDO data should be stored.
gs_context: GS context to retrieve data.
Returns:
The first return value is the name of the AFDO profile file found. None
otherwise.
The second return value indicates whether the profile found is expired or
not. False when no profile is found.
"""
latest_afdo_url = GSURL_LATEST_CHROME_AFDO % afdo_release_spec
# Check if latest-chrome-<arch>-<release>.afdo exists.
try:
latest_detail = gs_context.List(latest_afdo_url, details=True)
except gs.GSNoSuchKey:
logging.info('Could not find latest AFDO info file %s', latest_afdo_url)
return None, False
# Then get the name of the latest valid AFDO profile file.
local_dir = AFDO_BUILDROOT_LOCAL % {'build_root': buildroot}
latest_afdo_file = LATEST_CHROME_AFDO_FILE % afdo_release_spec
latest_afdo_path = os.path.join(local_dir, latest_afdo_file)
gs_context.Copy(latest_afdo_url, latest_afdo_path)
cand = osutils.ReadFile(latest_afdo_path).strip()
cand_build = int(cand.split('.')[2])
curr_build = int(afdo_release_spec['build'])
# Verify the AFDO profile file is not too stale.
mod_date = latest_detail[0].creation_time
curr_date = datetime.datetime.now()
allowed_stale_days = datetime.timedelta(days=AFDO_ALLOWED_STALE_DAYS)
if ((curr_date - mod_date) > allowed_stale_days and
(curr_build - cand_build) > AFDO_ALLOWED_STALE_BUILDS):
logging.info('Found latest AFDO info file %s but it is too old',
latest_afdo_url)
return cand, True
return cand, False
def GetBenchmarkProfile(cpv, _source, buildroot, gs_context):
"""Try to find the latest suitable AFDO profile file.
Try to find the latest AFDO profile generated for current release
and architecture. If there is none, check the previous release (mostly
in case we have just branched).
Args:
cpv: cpv object for Chrome.
source: benchmark source for which we are looking
buildroot: buildroot where AFDO data should be stored.
gs_context: GS context to retrieve data.
Returns:
Name of latest suitable AFDO profile file if one is found.
None otherwise.
"""
# Currently, benchmark based profiles can only be generated on amd64.
arch = 'amd64'
version_numbers = cpv.version.split('.')
current_release = version_numbers[0]
current_build = version_numbers[2]
afdo_release_spec = {
'package': cpv.package,
'arch': arch,
'release': current_release,
'build': current_build
}
afdo_file, expired = VerifyLatestAFDOFile(afdo_release_spec, buildroot,
gs_context)
if afdo_file and not expired:
return afdo_file
# The profile found in current release is too old. This clearly is a sign of
# problem. Therefore, don't try to find another one in previous branch.
if expired:
return None
# Could not find suitable AFDO file for the current release.
# Let's see if there is one from the previous release.
previous_release = str(int(current_release) - 1)
prev_release_spec = {
'package': cpv.package,
'arch': arch,
'release': previous_release,
'build': current_build
}
afdo_file, expired = VerifyLatestAFDOFile(prev_release_spec, buildroot,
gs_context)
if expired:
return None
return afdo_file
def UpdateLatestAFDOProfileInGS(cpv, arch, buildroot, profile_name, gs_context):
"""Updates our 'latest profile' file in GS to point to `profile_name`.
Args:
cpv: cpv object for Chrome.
arch: architecture for which we are looking for AFDO profile.
buildroot: buildroot where AFDO data should be stored.
profile_name: Name of the profile to point the 'latest profile' file at.
gs_context: GS context.
"""
_, local_dir, _ = _BuildrootToWorkDirs(buildroot)
# Create latest-chrome-<arch>-<release>.afdo pointing to the name
# of the AFDO profile file and upload to GS.
current_release = cpv.version.split('.')[0]
afdo_release_spec = {
'package': cpv.package,
'arch': arch,
'release': current_release
}
latest_afdo_file = LATEST_CHROME_AFDO_FILE % afdo_release_spec
latest_afdo_path = os.path.join(local_dir, latest_afdo_file)
osutils.WriteFile(latest_afdo_path, profile_name)
gs_context.Copy(
latest_afdo_path,
GSURL_LATEST_CHROME_AFDO % afdo_release_spec,
acl='public-read')
def GenerateAFDOData(cpv, arch, board, buildroot, gs_context):
"""Generate AFDO profile data from 'perf' data.
Given the 'perf' profile, generate an AFDO profile using create_llvm_prof.
It also creates a latest-chrome-<arch>-<release>.afdo file pointing
to the generated AFDO profile.
Uploads the generated data to GS for retrieval by the chrome ebuild
file when doing an 'afdo_use' build.
It is possible the generated data has previously been uploaded to GS
in which case this routine will not upload the data again. Uploading
again may cause verication failures for the ebuild file referencing
the previous contents of the data.
Args:
cpv: cpv object for Chrome.
arch: architecture for which we are looking for AFDO profile.
board: board we are building for.
buildroot: buildroot where AFDO data should be stored.
gs_context: GS context to retrieve/store data.
Returns:
Name of the AFDO profile file generated if successful, and whether or not
it was uploaded.
"""
CHROME_UNSTRIPPED_NAME = 'chrome.unstripped'
version_number = cpv.version
afdo_spec = {'package': cpv.package, 'arch': arch, 'version': version_number}
chroot_root, local_dir, in_chroot_local_dir = _BuildrootToWorkDirs(buildroot)
# Upload compressed chrome debug binary to GS for triaging purposes.
# TODO(llozano): This simplifies things in case of need of triaging
# problems but is it really necessary?
debug_bin = CHROME_DEBUG_BIN % {'root': chroot_root, 'board': board}
comp_debug_bin_path = CompressAFDOFile(debug_bin, buildroot)
GSUploadIfNotPresent(gs_context, comp_debug_bin_path,
GSURL_CHROME_DEBUG_BIN % afdo_spec)
# create_llvm_prof demands the name of the profiled binary exactly matches
# the name of the unstripped binary or it is named 'chrome.unstripped'.
# So create a symbolic link with the appropriate name.
local_debug_sym = os.path.join(local_dir, CHROME_UNSTRIPPED_NAME)
in_chroot_debug_bin = CHROME_DEBUG_BIN % {'root': '', 'board': board}
osutils.SafeUnlink(local_debug_sym)
os.symlink(in_chroot_debug_bin, local_debug_sym)
# Call create_llvm_prof tool to generated AFDO profile from 'perf' profile
# and upload it to GS. Need to call from within chroot since this tool
# was built inside chroot.
debug_sym = os.path.join(in_chroot_local_dir, CHROME_UNSTRIPPED_NAME)
# The name of the 'perf' file is based only on the version of chrome. The
# revision number is not included.
afdo_spec_no_rev = {
'package': cpv.package,
'arch': arch,
'version': cpv.version_no_rev.split('_')[0]
}
perf_afdo_file = CHROME_PERF_AFDO_FILE % afdo_spec_no_rev
perf_afdo_path = os.path.join(in_chroot_local_dir, perf_afdo_file)
afdo_file = CHROME_AFDO_FILE % afdo_spec
afdo_path = os.path.join(in_chroot_local_dir, afdo_file)
afdo_cmd = [
AFDO_GENERATE_LLVM_PROF,
'--binary=%s' % debug_sym,
'--profile=%s' % perf_afdo_path,
'--out=%s' % afdo_path
]
cros_build_lib.run(
afdo_cmd, enter_chroot=True, capture_output=True, print_cmd=True)
afdo_local_path = os.path.join(local_dir, afdo_file)
comp_afdo_path = CompressAFDOFile(afdo_local_path, buildroot)
uploaded_afdo_file = GSUploadIfNotPresent(gs_context, comp_afdo_path,
GSURL_CHROME_AFDO % afdo_spec)
return afdo_file, uploaded_afdo_file
def CanGenerateAFDOData(board):
"""Does this board has the capability of generating its own AFDO data?."""
return board in AFDO_DATA_GENERATORS_LLVM
def FindLatestProfile(target, versions):
"""Find latest profile that is usable by the target.
Args:
target: the target version
versions: a list of versions, and should be sorted
Returns:
latest profile that is older than the target
"""
candidates = [x for x in versions if tuple(x) < tuple(target)]
if len(candidates) == 0:
return None
return candidates[-1]
def PatchKernelEbuild(filename, version):
"""Update the AFDO_PROFILE_VERSION string in the given kernel ebuild file.
Args:
filename: name of the ebuild
version: e.g., [61, 9752, 0, 0]
"""
contents = []
for line in osutils.ReadFile(filename).splitlines():
if re.match(KERNEL_PROFILE_MATCH_PATTERN, line):
contents.append(KERNEL_PROFILE_WRITE_PATTERN % tuple(version) + '\n')
else:
contents.append(line + '\n')
osutils.WriteFile(filename, contents, atomic=True)
def CWPProfileToVersionTuple(url):
"""Convert a CWP profile url to a version tuple
Args:
url: for example, gs://chromeos-prebuilt/afdo-job/cwp/chrome/
R65-3325.65-1519323840.afdo.xz
Returns:
A tuple of (milestone, major, minor, timestamp)
"""
fn_mat = (
CWP_CHROME_PROFILE_NAME_PATTERN % tuple(r'([0-9]+)' for _ in range(0, 4)))
fn_mat.replace('.', '\\.')
return [int(x) for x in re.match(fn_mat, os.path.basename(url)).groups()]
def GetCWPProfile(cpv, source, _buildroot, gs_context):
"""Try to find the latest suitable AFDO profile file for cwp.
Try to find the latest AFDO profile generated for current release
and architecture.
Args:
cpv: cpv object for Chrome.
source: profile source
buildroot: buildroot where AFDO data should be stored.
gs_context: GS context to retrieve data.
Returns:
Name of latest suitable AFDO profile file if one is found.
None otherwise.
"""
ver_mat = r'([0-9]+)\.[0-9]+\.([0-9]+)\.([0-9]+)_rc-r[0-9]+'
target = [int(x) for x in re.match(ver_mat, cpv.version).groups()]
# Check 2 most recent milestones.
#
# When a branch just happens, the milestone of master increases by 1. There
# will be no profile from that milestone until a dev release is pushed for a
# short period of time. Therefore, a profile from previous branches must be
# picked instead.
#
# Originally, we search toward root in the branch tree for a profile. Now we
# prefer to look at the previous milestone if there's no profile from current
# milestone, because:
#
# 1. dev channel has few samples. The profile quality is much better from
# beta, which is always in a branch.
#
# 2. Master is actually closer to the branch tip than to the branch point,
# assuming that most of the changes on a branch are cherry-picked from
# master.
versions = []
for milestone in (target[0], target[0] - 1):
gs_ls_url = os.path.join(
GSURL_BASE_CWP, GSURL_CWP_SUBDIR[source],
CWP_CHROME_PROFILE_NAME_PATTERN % (milestone, '*', '*', '*'))
try:
res = gs_context.List(gs_ls_url)
versions.extend(CWPProfileToVersionTuple(x) for x in [r.url for r in res])
except gs.GSNoSuchKey:
pass
if not versions:
logging.info('profile not found for: %s', cpv.version)
return None
# crbug.com/984153: Sort the CWP profiles only by (milestone, timestamp)
versions.sort(key=lambda x: (x[0], x[3]))
cand = FindLatestProfile(target, versions)
# reconstruct the filename and strip .xz
return (CWP_CHROME_PROFILE_NAME_PATTERN % tuple(cand))[:-3]
def GetAvailableKernelProfiles():
"""Get available profiles on specified gsurl.
Returns:
a dictionary that maps kernel version, e.g. "4_4" to a list of
[milestone, major, minor, timestamp]. E.g,
[62, 9901, 21, 1506581147]
"""
gs_context = gs.GSContext()
gs_ls_url = os.path.join(KERNEL_PROFILE_URL, KERNEL_PROFILE_LS_PATTERN)
gs_match_url = os.path.join(KERNEL_PROFILE_URL, KERNEL_PROFILE_NAME_PATTERN)
try:
res = gs_context.List(gs_ls_url)
except gs.GSNoSuchKey:
logging.info('gs files not found: %s', gs_ls_url)
return {}
all_matches = [re.match(gs_match_url, x.url) for x in res]
matches = [x for x in all_matches if x]
versions = {}
for m in matches:
versions.setdefault(m.group(1), []).append([int(x) for x in m.groups()[1:]])
for v in versions:
# crbug.com/984153: Sort the kernel profiles only by (milestone, timestamp)
versions[v].sort(key=lambda x: (x[0], x[3]))
return versions
def FindKernelEbuilds():
"""Find all ebuilds that specify AFDO_PROFILE_VERSION.
The only assumption is that the ebuild files are named as the match pattern
in kver(). If it fails to recognize the ebuild filename, an error will be
thrown.
equery is not used because that would require enumerating the boards, which
is no easier than enumerating the kernel versions or ebuilds.
Returns:
a list of (ebuilds, kernel rev)
"""
def kver(ebuild):
matched = re.match(r'.*/chromeos-kernel-([0-9]+_[0-9]+)-.+\.ebuild$',
ebuild)
if matched:
return matched.group(1).replace('_', '.')
raise UnknownKernelVersion(
'Kernel version cannot be inferred from ebuild filename "%s".' % ebuild)
for fn in glob.glob(os.path.join(KERNEL_EBUILD_ROOT, '*', '*.ebuild')):
for line in osutils.ReadFile(fn).splitlines():
if re.match(KERNEL_PROFILE_MATCH_PATTERN, line):
yield (fn, kver(fn))
break
def ProfileAge(profile_version):
"""Tell the age of profile_version in days.
Args:
profile_version: [chrome milestone, cros major, cros minor, timestamp]
e.g., [61, 9752, 0, 1500000000]
Returns:
Age of profile_version in days.
"""
return (datetime.datetime.utcnow() -
datetime.datetime.utcfromtimestamp(profile_version[3])).days
PROFILE_SOURCES = {
'benchmark': GetBenchmarkProfile,
'silvermont': GetCWPProfile,
'airmont': GetCWPProfile,
'broadwell': GetCWPProfile,
}
| StarcoderdataPython |
12817445 | #
# Copyright (c) 2017. EPAM Systems
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import configparser
import logging
import os
import posixpath
import zipfile
import paramiko
from paramiko.ssh_exception import PasswordRequiredException
import common
from common import node_types
from common_exceptions import HostOfUnknownTypeException
from java_properties import JavaProperties
from util import lazy_property
logger = logging.getLogger(__name__)
def check_if_password_is_required(key_path):
try:
paramiko.RSAKey.from_private_key_file(key_path, None)
return False
except PasswordRequiredException:
return True
def _strip_tar_extensions(filename):
if filename.endswith(".tar.gz"):
return filename[:-7]
if filename.endswith(".tgz"):
return filename[:-4]
return filename
class Configuration(object):
def __init__(self, statistics_host, server_hosts, client_hosts, port, tests_package_path,
username, password, servers_per_test, public_key_path, private_key_path,
password_required, zookeeper_download_url, zookeeper_hosts, kafka_download_url,
kafka_hosts, full_execution_time):
super().__init__()
self.statistics_host = statistics_host
self.server_hosts = server_hosts
self.client_hosts = client_hosts
self.port = port
self.tests_package_path = tests_package_path
self.username = username
self.password = password
self.servers_per_test = servers_per_test
self.public_key_path = public_key_path
self.private_key_path = private_key_path
self.password_required = password_required
self.zookeeper_download_url = zookeeper_download_url
self.zookeeper_hosts = zookeeper_hosts
self.kafka_download_url = kafka_download_url
self.kafka_hosts = kafka_hosts
self.full_execution_time = full_execution_time
@property
def file_server(self):
return self.statistics_host
@lazy_property
def remote_home(self):
return posixpath.join("/home", self.username)
@lazy_property
def zookeeper_dist_name(self):
return self.zookeeper_download_url.rsplit("/", 1)[1]
@lazy_property
def zookeeper_dir_name(self):
return posixpath.join(self.remote_home, _strip_tar_extensions(self.zookeeper_dist_name))
@lazy_property
def kafka_dist_name(self):
return self.kafka_download_url.rsplit("/", 1)[1]
@lazy_property
def kafka_dir_name(self):
return posixpath.join(self.remote_home, _strip_tar_extensions(self.kafka_dist_name))
def determine_node_type(self, host):
if host in self.server_hosts:
return node_types.IGNITE_SERVER
if host in self.client_hosts:
return node_types.IGNITE_CLIENT
if host in self.kafka_hosts:
return node_types.KAFKA_BROKER
if host in self.zookeeper_hosts:
return node_types.ZOOKEEPER_SERVER
if host == self.statistics_host:
return node_types.STATISTICS_HOST
raise HostOfUnknownTypeException("host {} not present in passed config".format(host))
@classmethod
def read(cls, config_filepath):
parser = configparser.ConfigParser(
allow_no_value=True,
interpolation=configparser.ExtendedInterpolation())
parser.read(config_filepath)
config = parser["load.test"]
password = config.get("Password")
tests_package_path = config["TestsPackagePath"]
properties = _read_settings_from_java_properties(tests_package_path)
full_execution_time = (int(properties["load.tests.warmup.period"]) +
int(properties["load.tests.execution.time"]))
full_execution_time /= 1000 # Duration in load tests properties is written in milliseconds.
statistics_host = properties[common.ganglia_host_property_name]
public_key_path = config.get("PublicKeyPath")
if public_key_path is not None and not os.path.exists(public_key_path):
public_key_path = None
private_key_path = config.get("PrivateKeyPath")
password_required = check_if_password_is_required(private_key_path)
if private_key_path is None and password is None:
logger.warning("No password or private key provided, ssh logins are likely to fail.")
return cls(
username=config["User"],
password=password,
server_hosts=_get_config_list(config, "ServerHosts"),
client_hosts=_get_config_list(config, "ClientHosts"),
statistics_host=statistics_host,
port=int(config.get("Port", 22)),
servers_per_test=_get_config_list(config, "ServersPerTest", int),
tests_package_path=tests_package_path,
public_key_path=public_key_path,
private_key_path=private_key_path,
password_required=<PASSWORD>,
zookeeper_download_url=config["ZookeeperDownloadUrl"],
zookeeper_hosts=_get_config_list(config, "ZookeeperHosts"),
kafka_download_url=config["KafkaDownloadUrl"],
kafka_hosts=_get_config_list(config, "KafkaHosts"),
full_execution_time=full_execution_time)
def _read_settings_from_java_properties(tests_package_path):
with zipfile.ZipFile(tests_package_path) as tests_package:
byte_text = tests_package.read(common.load_test_properties_path)
return JavaProperties.read(byte_text.decode().splitlines())
def _get_config_list(config, comma_separated_list_name, cls=str):
return [cls(item.strip()) for item in config[comma_separated_list_name].split(",")]
| StarcoderdataPython |
6594659 | from tkinter import *
root=Tk()
root.geometry("300x300")
root.resizable(0,0)
b1=Button(root,text="Click",font=("Arial",20),fg="blue",bg="red")
b1.pack()
root.mainloop() | StarcoderdataPython |
214428 | # Copyright 2021 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ===========================================================================
"""generate json desc for SigmoidCrossEntropyWithLogits"""
from ._utils import Expander, ExpanderInfoValidator as VLD
@VLD.check_all_formats_same
class SigmoidCrossEntropyWithLogits(Expander):
"""SigmoidCrossEntropyWithLogits expander"""
def _expand(self, graph_builder):
logits, label = self.inputs
# Calculate sigmoid_cross_entropy_with_logits(logits, label)
# formula is: -(label * log(sigmoid(logits)) + (1 - label) * log(1 - sigmoid(logits)))
const_one = graph_builder.value(logits.dtype, 1.0)
neg_x = graph_builder.emit('Neg', [logits])
exp_neg_x = graph_builder.emit('Exp', [neg_x])
add_exp = graph_builder.emit('Add', [const_one, exp_neg_x])
p = graph_builder.emit('RealDiv', [const_one, add_exp])
one_sub_p = graph_builder.emit('Sub', [const_one, p])
one_sub_label = graph_builder.emit('Sub', [const_one, label])
log_p = graph_builder.emit('Log', [p])
log_one_sub_p = graph_builder.emit('Log', [one_sub_p])
res_tmp_1 = graph_builder.emit('Mul', [one_sub_label, log_one_sub_p])
res_tmp_2 = graph_builder.emit('Mul', [label, log_p])
res_tmp = graph_builder.emit('Add', [res_tmp_1, res_tmp_2])
res = graph_builder.emit('Neg', [res_tmp])
return res
| StarcoderdataPython |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.