code
stringlengths 3
1.05M
| repo_name
stringlengths 5
104
| path
stringlengths 4
251
| language
stringclasses 1
value | license
stringclasses 15
values | size
int64 3
1.05M
|
|---|---|---|---|---|---|
import os
UPPERLETTERS = "ABCDEFGHIJKLMNOPQRSTUVWXYZ"
LETTERS_AND_SPACE = UPPERLETTERS + UPPERLETTERS.lower() + " \t\n"
def loadDictionary():
path = os.path.split(os.path.realpath(__file__))
englishWords = {}
with open(path[0] + "/dictionary.txt") as dictionaryFile:
for word in dictionaryFile.read().split("\n"):
englishWords[word] = None
return englishWords
ENGLISH_WORDS = loadDictionary()
def getEnglishCount(message):
message = message.upper()
message = removeNonLetters(message)
possibleWords = message.split()
if possibleWords == []:
return 0.0
matches = 0
for word in possibleWords:
if word in ENGLISH_WORDS:
matches += 1
return float(matches) / len(possibleWords)
def removeNonLetters(message):
lettersOnly = []
for symbol in message:
if symbol in LETTERS_AND_SPACE:
lettersOnly.append(symbol)
return "".join(lettersOnly)
def isEnglish(message, wordPercentage=20, letterPercentage=85):
"""
>>> isEnglish('Hello World')
True
>>> isEnglish('llold HorWd')
False
"""
wordsMatch = getEnglishCount(message) * 100 >= wordPercentage
numLetters = len(removeNonLetters(message))
messageLettersPercentage = (float(numLetters) / len(message)) * 100
lettersMatch = messageLettersPercentage >= letterPercentage
return wordsMatch and lettersMatch
if __name__ == "__main__":
import doctest
doctest.testmod()
|
TheAlgorithms/Python
|
strings/detecting_english_programmatically.py
|
Python
|
mit
| 1,495
|
import argparse
import csv
import logging
import math
import os
from datetime import date
import numpy
import pandas
from statsmodels.formula.api import OLS
from matplotlib import pyplot
from btplatform import PositionAdjuster, process_strategy, BacktestHistory
from meanrevert import MeanReversionStrategy, PortfolioDataCollector, StrategyDataCollector, \
MeanReversionStrategyRunner
from pricetools import load_prices
def backtest_strategy(start_date, end_date, strategy_runner, symbols, prices_path):
securities = ['PCX/' + symbol for symbol in symbols]
prices_by_security = dict()
close_prices = pandas.DataFrame()
max_start_date = start_date
min_end_date = end_date
for security in securities:
exchange, security_code = security.split('/')
prices_df = load_prices(prices_path, exchange, security_code)
prices_by_security[security] = prices_df
if max_start_date is not None:
max_start_date = max(max_start_date, prices_df.index.min())
else:
max_start_date = prices_df.index.min()
if min_end_date is not None:
min_end_date = min(min_end_date, prices_df.index.max())
else:
min_end_date = prices_df.index.max()
close_prices[security] = prices_df['close adj']
close_prices.reset_index(inplace=True)
logging.info('considering date range: %s through %s' % (max_start_date, min_end_date))
for security in securities:
truncate_start_date = prices_by_security[security].index >= max_start_date
truncate_end_date = prices_by_security[security].index <= min_end_date
prices_by_security[security] = prices_by_security[security][truncate_start_date & truncate_end_date]
data_collector = StrategyDataCollector(strategy_runner.get_strategy_name())
process_strategy(securities, strategy_runner, data_collector, prices_by_security)
return data_collector
def backtest_portfolio(portfolios, starting_equity, start_date, end_date, prices_path, step_size, max_net_position,
max_gross_position, max_risk_scale, warmup_period):
data_collector = PortfolioDataCollector()
for lookback_period, portfolio, strategy_name in portfolios:
securities = portfolio.split('/')
strategy = MeanReversionStrategy(securities, int(lookback_period), name=strategy_name)
position_adjuster = PositionAdjuster(securities, strategy.get_strategy_name(), max_net_position,
max_gross_position, max_risk_scale,
starting_equity,
step_size)
strategy_runner = MeanReversionStrategyRunner(securities, strategy, warmup_period, position_adjuster)
data_collection = backtest_strategy(start_date, end_date, strategy_runner, securities, prices_path)
data_collector.add_equity(starting_equity)
target_quantities = data_collection.get_target_quantities(strategy.get_strategy_name())
fills = position_adjuster.get_fills()
data_collector.add_strategy_data(securities, target_quantities, fills)
return data_collector
def chart_backtest(start_date, end_date, securities, prices_path, lookback_period,
step_size, start_equity,
max_net_position, max_gross_position, max_risk_scale, warmup_period):
pyplot.style.use('ggplot')
strategy = MeanReversionStrategy(securities, int(lookback_period))
position_adjuster = PositionAdjuster(securities, strategy.get_strategy_name(), max_net_position, max_gross_position,
max_risk_scale,
start_equity,
step_size)
strategy_runner = MeanReversionStrategyRunner(securities, strategy, warmup_period, position_adjuster)
data_collection = backtest_strategy(start_date, end_date, strategy_runner, securities, prices_path)
backtest_history = BacktestHistory(position_adjuster.get_fills(), start_equity)
logging.info('fit quality: %s', fit_quality(backtest_history.get_equity() - start_equity))
backtest_history.get_equity().plot(linewidth=2.)
backtest_history.get_gross_net_position().plot(linewidth=2.)
pyplot.gca().get_yaxis().get_major_formatter().set_useOffset(False)
data_collection.get_factors(','.join(securities)).plot(linewidth=2., subplots=True)
styles = {'level_inf': 'm--', 'level_sup': 'b--', 'signal': 'k-'}
data_collection.get_bollinger(','.join(securities)).plot(linewidth=2., subplots=False, style=styles)
pyplot.show()
def fit_quality(df):
regr_df = df.reset_index()
day_nanos = 24 * 60 * 60 * 1E9
nanos = regr_df['date'] - regr_df['date'].min()
df2 = pandas.DataFrame(data=[nanos.astype(int) / day_nanos, regr_df['equity']]).transpose()
ols2 = OLS(df2['equity'], df2['date'])
result = ols2.fit()
return {'p-value F-test': result.f_pvalue, 'r-squared': result.rsquared, 'p-value x': result.pvalues[0]}
def create_summary(strategy_name, backtest_history, closed_trades):
mean_trade = closed_trades['pnl'].mean()
worst_trade = closed_trades['pnl'].min()
count_trades = closed_trades['pnl'].count()
max_drawdown = backtest_history.get_drawdown().max()['equity']
final_equity = backtest_history.get_equity()['equity'][-1]
summary = {
'strategy': strategy_name,
'sharpe_ratio': backtest_history.get_sharpe_ratio(),
'average_trade': mean_trade,
'worst_trade': worst_trade,
'count_trades': count_trades,
'max_drawdown_pct': max_drawdown,
'final_equity': final_equity
}
return summary
def load_portfolios(portfolios_filename):
portfolios = list()
with open(portfolios_filename) as csv_file:
reader = csv.reader(csv_file)
for row in reader:
if len(row) == 0:
continue
if row[0].startswith('#'):
continue
portfolios.append(row)
logging.info('loaded portfolios: %s' % str(portfolios))
return portfolios
def main(args):
# TODO arg line
warmup_period = 10
prices_path = args.prices_path
start_date = date(int(args.start_yyyymmdd[:4]), int(args.start_yyyymmdd[4:6]), int(args.start_yyyymmdd[6:8]))
end_date = date(int(args.end_yyyymmdd[:4]), int(args.end_yyyymmdd[4:6]), int(args.end_yyyymmdd[6:8]))
if args.display_single is not None:
securities = args.display_single.split('/')
chart_backtest(start_date, end_date, securities, prices_path, lookback_period=args.lookback_period,
step_size=args.step_size, start_equity=args.starting_equity,
max_net_position=args.max_net_position,
max_gross_position=args.max_gross_position,
max_risk_scale=args.max_risk_scale, warmup_period=warmup_period)
elif args.portfolio is not None:
portfolios = load_portfolios(args.portfolio)
step_size = args.step_size
starting_equity = args.starting_equity
max_net_position = args.max_net_position
max_gross_position = args.max_gross_position
max_risk_scale = args.max_risk_scale
data_collector = backtest_portfolio(portfolios, starting_equity, start_date, end_date, prices_path, step_size,
max_net_position, max_gross_position, max_risk_scale, warmup_period)
backtest_history = BacktestHistory(data_collector.fills_df, data_collector.starting_equity)
backtest_history.trades_pnl.to_pickle(os.sep.join([args.trades_pnl_path, 'trades_pnl.pkl']))
trades = backtest_history.get_trades()
holdings = backtest_history.get_holdings()
equity = backtest_history.get_equity()
target_df = data_collector.new_targets
positions = holdings[['date', 'security', 'total_qty']].groupby(['date', 'security']).sum().unstack().ffill()
latest_holdings = holdings.pivot_table(index='date', columns='security', values='total_qty',
aggfunc=numpy.sum).tail(1).transpose()
latest_holdings.columns = ['quantity']
starting_equity = equity.iloc[0]
ending_equity = equity.iloc[-1]
days_interval = equity.index[-1] - equity.index[0]
sharpe_ratio = math.sqrt(250) * equity.pct_change().mean() / equity.pct_change().std()
logging.info('sharpe ratio: %.2f', sharpe_ratio)
annualized_return = 100 * (numpy.power(ending_equity / starting_equity, 365 / days_interval.days) - 1)
logging.info('annualized return: %.2f percent' % annualized_return)
logging.info('trades:\n%s', trades.tail(10).transpose())
logging.info('positions:\n%s', positions.tail(10).transpose())
logging.info('new target quantities:\n%s' % (target_df))
target_trades = (target_df - latest_holdings.transpose()).transpose().dropna()
logging.info('future trades:\n%s' % target_trades.round())
elif args.display_portfolio is not None:
portfolios = load_portfolios(args.display_portfolio)
pyplot.style.use('ggplot')
trades_pnl_path = os.sep.join([args.trades_pnl_path, 'trades_pnl.pkl'])
logging.info('loading data from path: %s', os.path.abspath(trades_pnl_path))
trades_pnl_df = pandas.read_pickle(trades_pnl_path)
backtest_history = BacktestHistory(trades_pnl_df)
backtest_history.set_start_equity(len(portfolios) * args.starting_equity)
pnl_data = backtest_history.trades_pnl[['strategy', 'date', 'realized_pnl', 'unrealized_pnl']]
by_strategy_date = pnl_data.groupby(by=['strategy', 'date'])
by_strategy_date.sum().apply(sum, axis=1).unstack().transpose().plot(linewidth=2., subplots=True, layout=(-1, 2))
holdings = backtest_history.get_holdings()
equity = backtest_history.get_equity()
benchmark = load_prices(prices_path, 'PCX', 'SPY')
equity_df = benchmark[['close adj']].join(equity).dropna()
equity_df.columns = ['benchmark', 'equity']
equity_df['benchmark'] = (equity_df['benchmark'].pct_change() + 1.).cumprod() * equity_df.head(1)[
'equity'].min()
equity_df.plot(linewidth=2.)
logging.info('fit quality: %s', fit_quality(equity - args.starting_equity))
by_security_pos = holdings.pivot_table(index='date', columns='security', values='market_value',
aggfunc=numpy.sum)
by_security_pos.plot(linewidth=2.)
positions_aggregated_net = holdings.groupby('date')['market_value'].sum()
positions_aggregated_gross = holdings.groupby('date')['market_value'].agg(lambda x: numpy.abs(x).sum())
positions_net_gross = numpy.array([positions_aggregated_net, positions_aggregated_gross]).transpose()
positions_aggregated = pandas.DataFrame(index=positions_aggregated_net.index,
data=positions_net_gross,
columns=['net', 'gross'])
positions_aggregated = positions_aggregated.join(equity * 3.0)
positions_aggregated.rename(columns={'equity': 'margin_warning'}, inplace=True)
positions_aggregated = positions_aggregated.join(equity * 4.0)
positions_aggregated.rename(columns={'equity': 'margin_violation'}, inplace=True)
positions_aggregated.plot(linewidth=2., subplots=False)
pyplot.show()
elif args.batch is not None:
# backtest batch
portfolios_path = args.batch
logging.info('processing batch: %s', os.path.abspath(portfolios_path))
with open(portfolios_path) as portfolios_file:
portfolios = [line.strip().split(',') for line in portfolios_file.readlines()]
results = list()
for symbols in portfolios:
strategy = MeanReversionStrategy(symbols, int(args.lookback_period))
position_adjuster = PositionAdjuster(symbols, strategy.get_strategy_name(), args.max_net_position,
args.max_gross_position,
args.max_risk_scale,
args.starting_equity,
args.step_size)
strategy_runner = MeanReversionStrategyRunner(symbols, strategy, warmup_period, position_adjuster)
backtest_strategy(start_date, end_date, strategy_runner, symbols, prices_path)
backtest_history = BacktestHistory(position_adjuster.get_fills(), args.starting_equity)
backtest_data = fit_quality(backtest_history.get_equity() - args.starting_equity)
closed_trades = position_adjuster.get_strategy_trades(closed_only=True)
backtest_data.update(create_summary(strategy.get_strategy_name(), backtest_history, closed_trades))
results.append(backtest_data)
result_df = pandas.DataFrame(results).set_index('strategy')
result_df.to_csv('backtest-results.csv')
print(result_df)
if __name__ == "__main__":
logging.basicConfig(level=logging.INFO, format='%(asctime)s:%(name)s:%(levelname)s:%(message)s')
file_handler = logging.FileHandler('backtest.log', mode='w')
formatter = logging.Formatter('%(asctime)s:%(name)s:%(levelname)s:%(message)s')
file_handler.setFormatter(formatter)
logging.getLogger().addHandler(file_handler)
logging.info('starting script')
parser = argparse.ArgumentParser(description='Backtesting prototype.',
formatter_class=argparse.ArgumentDefaultsHelpFormatter
)
parser.add_argument('--start-yyyymmdd', type=str, help='backtest start date', default='20130101')
parser.add_argument('--end-yyyymmdd', type=str, help='backtest end date', default=date.today().strftime('%Y%m%d'))
parser.add_argument('--display-single', type=str, help='display strategy composed of comma-separated securities')
parser.add_argument('--display-portfolio', type=str, help='display aggregated portfolio from specified file')
parser.add_argument('--portfolio', type=str, help='display aggregated portfolio from specified file')
parser.add_argument('--batch', type=str, help='processes strategies in batch mode')
parser.add_argument('--lookback-period', type=int, help='lookback period', default=200)
parser.add_argument('--step-size', type=int, help='deviation unit measured in number of standard deviations',
default=2)
parser.add_argument('--starting-equity', type=float,
help='amount of equity allocated to each strategy (for one risk step)', default=8000)
parser.add_argument('--actual-equity', type=float, help='total equity available for trading')
parser.add_argument('--max-net-position', type=float,
help='max allowed net position for one step, measured as a fraction of equity', default=0.4)
parser.add_argument('--max-gross-position', type=float,
help='max allowed gross position by step, measured as a fraction of equity', default=2.)
parser.add_argument('--max-risk-scale', type=int, help='max number of steps', default=3)
parser.add_argument('--prices-path', type=str, help='path to prices data', default='data')
parser.add_argument('--trades-pnl-path', type=str, help='path to trades pnl data', default='.')
args = parser.parse_args()
pandas.set_option('expand_frame_repr', False)
main(args)
# dev: --start-yyyymmdd 20170101 --end-yyyymmdd 20170313
|
chris-ch/omarket
|
python-lab/backtest.py
|
Python
|
apache-2.0
| 15,929
|
from pyfunk.monads import Monad
class Identity(Monad):
pass
|
danceasarxx/pyfunk
|
pyfunk/monads/identity.py
|
Python
|
gpl-3.0
| 66
|
# -*- coding: utf-8 -*-
'''
Exodus Add-on
Copyright (C) 2016 lambda
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
'''
import re,os,json,urllib,urlparse
from resources.lib.modules import control
from resources.lib.modules import cleantitle
from resources.lib.modules import client
from resources.lib.modules import workers
class source:
def __init__(self):
self.priority = 1
self.language = ['en']
self.domains = ['torba.se']
self.base_link = 'http://torba.se'
self.search_mv_link = '/movies/autocomplete?order=relevance&title=%s'
self.search_tv_link = '/series/autocomplete?order=relevance&title=%s'
self.tv_link = '/series/%s/%s/%s'
self.mv_link = '/v/%s'
def movie(self, imdb, title, localtitle, year):
try:
query = self.search_mv_link % (urllib.quote_plus(title))
query = urlparse.urljoin(self.base_link, query)
r = client.request(query, XHR=True)
r = json.loads(r)
t = cleantitle.get(title)
r = [(i['slug'], i['title'], i['year']) for i in r]
r = [i[0] for i in r if t == cleantitle.get(i[1]) and year == str(i[2])][0]
url = r.encode('utf-8')
return url
except:
return
def tvshow(self, imdb, tvdb, tvshowtitle, localtvshowtitle, year):
try:
query = self.search_tv_link % (urllib.quote_plus(tvshowtitle))
query = urlparse.urljoin(self.base_link, query)
r = client.request(query, XHR=True)
r = json.loads(r)
t = cleantitle.get(tvshowtitle)
r = [(i['slug'], i['title'], i['year']) for i in r]
r = [i[0] for i in r if t == cleantitle.get(i[1]) and year == str(i[2])][0]
url = r.encode('utf-8')
return url
except:
return
def episode(self, url, imdb, tvdb, title, premiered, season, episode):
if url == None: return
url = '%s/%01d/%01d' % (url, int(season), int(episode))
url = url.encode('utf-8')
return url
def sources(self, url, hostDict, hostprDict):
try:
sources = []
if url == None: return sources
try: url = self.tv_link % re.findall('(.+?)/(\d*)/(\d*)$', url)[0]
except: url = self.mv_link % url
url = urlparse.urljoin(self.base_link, url)
r = client.request(url)
url = client.parseDOM(r, 'a', ret='href', attrs = {'class': 'video-play.+?'})[0]
url = re.findall('(?://|\.)streamtorrent\.tv/.+?/([0-9a-zA-Z/]+)', url)[0]
u = 'https://streamtorrent.tv/api/torrent/%s.json' % url
r = client.request(u)
r = json.loads(r)
r = [i for i in r['files'] if 'streams' in i and len(i['streams']) > 0][0]
r = [{'height': i['height'], 'stream_id': r['_id'], 'vid_id': url} for i in r['streams']]
links = []
links += [{'quality': '1080p', 'url': urllib.urlencode(i)} for i in r if int(i['height']) >= 1080]
links += [{'quality': 'HD', 'url': urllib.urlencode(i)} for i in r if 720 <= int(i['height']) < 1080]
links += [{'quality': 'SD', 'url': urllib.urlencode(i)} for i in r if int(i['height']) <= 720]
links = links[:3]
for i in links: sources.append({'source': 'torba.se', 'quality': i['quality'], 'language': 'en', 'url': i['url'], 'direct': True, 'debridonly': False, 'autoplay': False})
return sources
except:
return sources
def resolve(self, url):
try:
m3u8 = [
'#EXTM3U',
'#EXT-X-MEDIA:TYPE=AUDIO,GROUP-ID="audio",DEFAULT=YES,AUTOSELECT=YES,NAME="Stream 1",URI="{audio_stream}"',
'',
'#EXT-X-STREAM-INF:PROGRAM-ID=1,BANDWIDTH=0,NAME="{stream_name}",AUDIO="audio"',
'{video_stream}'
]
query = urlparse.parse_qs(url)
query = dict([(key, query[key][0]) if query[key] else (key, '') for key in query])
auth = 'http://streamtorrent.tv/api/torrent/%s/%s.m3u8?json=true' % (query['vid_id'], query['stream_id'])
r = client.request(auth)
r = json.loads(r)
try: url = r['url']
except: url = None
if not url == None:
def dialog(url):
try: self.disableScraper = control.yesnoDialog('To watch this video visit from any device', '[COLOR skyblue]%s[/COLOR]' % url, '', 'Torba', 'Cancel', 'Settings')
except: pass
workers.Thread(dialog, url).start()
control.sleep(3000)
for i in range(100):
try:
if not control.condVisibility('Window.IsActive(yesnoDialog)'): break
r = client.request(auth)
r = json.loads(r)
try: url = r['url']
except: url = None
if url == None: break
workers.Thread(dialog, url).start()
control.sleep(3000)
except:
pass
if self.disableScraper:
control.openSettings(query='2.0')
return ''
control.execute('Dialog.Close(yesnoDialog)')
if not url == None: return
stream_name = '%sp' % (query['height'])
video_stream = r[stream_name]
if not 'audio' in r: return video_stream
audio_stream = r['audio']
content = ('\n'.join(m3u8)).format(**{'audio_stream': audio_stream, 'stream_name': stream_name, 'video_stream': video_stream})
path = os.path.join(control.dataPath, 'torbase.m3u8')
control.makeFile(control.dataPath) ; control.deleteFile(path)
file = control.openFile(path, 'w') ; file.write(content) ; file.close()
return path
except:
return
|
viranch/exodus
|
resources/lib/sources/torba.py
|
Python
|
gpl-3.0
| 6,763
|
# -*- coding: iso-8859-1 -*-
#
# Copyright (C) 2001 - 2020 Massimo Gerardi all rights reserved.
#
# Author: Massimo Gerardi massimo.gerardi@gmail.com
#
# Copyright (c) 2020 Qsistemi.com. All rights reserved.
#
# Viale Giorgio Ribotta, 11 (Roma)
# 00144 Roma (RM) - Italy
# Phone: (+39) 06.87.163
#
#
# Si veda file COPYING per le condizioni di software.
#
# www.qsistemi.com - italy@qsistemi.com
import cfg
from reportlab.lib.pagesizes import *
def layout():
return portrait(A4)
def testata (c,row):
c.setFont('Helvetica-Bold',12)
if str(row['tipo_ord'])=='PC' :
c.drawString(308,646,_("OFFERTA CLIENTE"))
if str(row['tipo_ord'])=='OC' :
c.drawString(308,646,_("ORDINE CLIENTE"))
c.setFont('Helvetica',12)
c.drawString(466,617,str(row['data_ord']))
c.drawString(360,617,str(row['num_ord']))
c.drawString(143,655,str(row['piva']))
c.setFont('Helvetica-Bold',12)
c.drawString(313,723,str(row['rag_soc']))
c.drawString(313,706,str(row['indi']))
c.drawString(313,690,str(row['cap_zona_pr']))
c.setFont('Helvetica',12)
c.drawRightString(536,744,str(row['cod_cf']))
c.drawString(119,641,str(row['dpag']))
if cfg.logofinc=='1':
c.drawImage(cfg.path_img+'/logo1.jpg',25,740,180,85)
if str(row['banca'])!='':
c.setFont('Helvetica-Bold',10)
c.drawString(40,622,"Banca: ")
#c.drawString(40,620," Check Cin Abi Cab Conto")
c.drawString(40,610,"IBAN: ")
c.setFont('Helvetica',10)
c.drawString(80,622,str(row['banca']))
c.drawString(80,610,str(row['iban']))
elif cfg.logofinc=='2':
c.drawImage(cfg.path_img+'/logo2.jpg',25,745,80,70)
c.setFont('Helvetica-Bold',14)
c.drawString(110,780,str(row['rsaz']))
c.setFont('Helvetica',10)
if str(row['ind1'])!="":
c.drawString(39,720,"Sede Legale: " + str(row['ind_cap_loc_pr']))
c.drawString(39,710,"Sede Amm.: " + str(row['ind_cap_loc_pr1']))
c.drawString(39,700,str(row['tel_faxaz']))
c.drawString(39,690,str(row['emailaz']))
c.drawString(39,680,str(row['piva_cfaz']))
else:
c.drawString(39,720,str(row['ind_cap_loc_pr']))
c.drawString(39,710,str(row['tel_faxaz']))
c.drawString(39,700,str(row['emailaz']))
c.drawString(39,690,str(row['piva_cfaz']))
if str(row['banca'])!='':
c.setFont('Helvetica-Bold',10)
c.drawString(40,622,"Banca: ")
#c.drawString(40,620," Check Cin Abi Cab Conto")
c.drawString(40,610,"IBAN: ")
c.setFont('Helvetica',10)
c.drawString(80,622,str(row['banca']))
c.drawString(80,610,str(row['iban']))
elif cfg.logofinc=='3':
c.drawImage(cfg.path_img+'/logo3.jpg',39,740,180,85)
#c.setFont('Helvetica-Bold',14)
#c.drawString(110,800,str(row['rsaz']))
c.setFont('Helvetica',10)
if str(row['ind1'])!="":
c.drawString(39,725,"Sede Legale: " + str(row['ind_cap_loc_pr']))
c.drawString(39,715,"Sede Amm.: " + str(row['ind_cap_loc_pr1']))
c.drawString(39,705,str(row['tel_faxaz']))
c.drawString(39,695,str(row['emailaz']))
c.drawString(39,685,str(row['piva_cfaz']))
else:
c.drawString(39,725,str(row['ind_cap_loc_pr']))
c.drawString(39,715,str(row['tel_faxaz']))
c.drawString(39,705,str(row['emailaz']))
c.drawString(39,695,str(row['piva_cfaz']))
if str(row['banca'])!='':
c.setFont('Helvetica-Bold',10)
c.drawString(40,622,"Banca: ")
#c.drawString(40,620," Check Cin Abi Cab Conto")
c.drawString(40,610,"IBAN: ")
c.setFont('Helvetica',10)
c.drawString(80,622,str(row['banca']))
c.drawString(80,610,str(row['iban']))
elif cfg.logofinc=='4':
#c.drawImage(cfg.path_img+'/logo4.jpg',39,740,180,85)
##c.drawImage(cfg.path_img+'/logo4.jpg',39,740,260,85)
c.drawImage(cfg.path_img+'/logo4.jpg',39,740,260,75)
c.setFont('Helvetica-Bold',14)
c.drawString(39,730,str(row['rsaz']))
c.setFont('Helvetica',10)
if str(row['ind1'])!="":
c.drawString(39,715,"Sede Legale: " + str(row['ind_cap_loc_pr']))
c.drawString(39,705,"Sede Amm.: " + str(row['ind_cap_loc_pr1']))
c.drawString(39,695,str(row['tel_faxaz']))
c.drawString(39,685,str(row['emailaz']))
c.drawString(39,675,str(row['piva_cfaz']))
else:
c.drawString(39,715,str(row['ind_cap_loc_pr']))
c.drawString(39,703,"Telefono: " + str(row['telaz']))
c.drawString(39,691,str(row['emailaz']))
c.drawString(39,678,str(row['piva_cfaz']))
if str(row['banca'])!='':
c.setFont('Helvetica-Bold',10)
c.drawString(40,622,"Banca: ")
#c.drawString(40,620," Check Cin Abi Cab Conto")
c.drawString(40,610,"IBAN: ")
c.setFont('Helvetica',10)
c.drawString(80,622,str(row['banca']))
c.drawString(80,610,str(row['iban']))
else:
c.setFont('Helvetica-Bold',14)
c.drawString(39,800,str(row['rsaz']))
c.setFont('Helvetica',10)
if str(row['ind1'])!="":
c.drawString(39,785,"Sede Legale: " + str(row['ind_cap_loc_pr']))
c.drawString(39,770,"Sede Amm.: " + str(row['ind_cap_loc_pr1']))
c.drawString(39,755,str(row['tel_faxaz']))
c.drawString(39,740,str(row['emailaz']))
c.drawString(39,725,str(row['piva_cfaz']))
else:
c.drawString(39,785,str(row['ind_cap_loc_pr']))
c.drawString(39,770,str(row['tel_faxaz']))
c.drawString(39,755,str(row['emailaz']))
c.drawString(39,740,str(row['piva_cfaz']))
if str(row['banca'])!='':
c.setFont('Helvetica-Bold',10)
c.drawString(40,622,"Banca: ")
#c.drawString(40,620," Check Cin Abi Cab Conto")
c.drawString(40,610,"IBAN: ")
c.setFont('Helvetica',10)
c.drawString(80,622,str(row['banca']))
c.drawString(80,610,str(row['iban']))
def struttura (c):
c.rect(307,761,254,-92,1,0)
c.rect(36,600,525,-415,1,0)
c.rect(110,600,0,-415,1,0)
c.rect(330,600,0,-415,1,0)
c.rect(370,600,0,-415,1,0)
c.rect(460,600,0,-415,1,0)
c.rect(504,600,0,-415,1,0)
c.rect(37,575,525,0,1,0)
c.setFont('Helvetica-Bold',10)
c.drawString(41,583,_("Cod.Articolo"))
c.drawString(115,583,_("Descrizione Articolo"))
c.setFont('Helvetica-Bold',12)
c.drawString(319,616,_("Num. "))
c.drawString(39,655,_("P.Iva/Cod. Fisc. :"))
c.setFont('Helvetica-Bold',10)
c.drawString(342,583,_("Q.ta`"))
c.drawString(376,583,_("Prezzo Unitario"))
c.drawString(477,584,_("Iva"))
c.drawString(515,583,_("Importo"))
c.drawString(52,156,_("Totale merce"))
c.drawString(413,156,_("Valuta"))
c.drawString(136,156,_("Imponibile"))
c.drawString(204,156,_("Iva"))
c.drawString(320,156,_("Importo Iva"))
c.drawString(231,156,_("Descrizione"))
c.rect(36,177,526,-95,1,0)
c.setFont('Helvetica',11)
c.drawString(313,744,_("Spett.le Ditta"))
c.rect(307,634,254,-26,1,0)
c.setFont('Helvetica-Bold',12)
c.drawString(436,616,_("del "))
#c.drawString(308,646,"PREVISIONE DI ORDINE")
c.setFont('Helvetica-Bold',10)
c.drawString(51,118,_("Scadenze :"))
c.drawString(52,105,_("Importi :"))
c.drawString(470,156,_("Totale Documento"))
c.setFont('Helvetica-Bold',12)
c.drawString(39,641,_("Pagamento :"))
c.drawString(39,618,_("Banca :"))
c.setFont('Helvetica',12)
c.drawString(418,139,_("Euro"))
def corpo (c,row,Y):
c.setFont('Helvetica',10)
c.drawString(41,Y,str(row['codart']))
c.drawString(115,Y,str(row['descriz']))
c.drawRightString(364,Y,str(row['qt_ord']))
c.drawRightString(453,Y,str(row['prez_un']))
c.drawString(480,Y,str(row['alivaart']))
c.drawRightString(554,Y,str(row['tot_riga']))
def dettaglioiva (c,row,Y):
c.setLineWidth(1)
c.setFont('Times-Roman',12)
c.setFont('Helvetica',12)
c.drawString(235,Y,str(row['d_des']))
c.drawString(141,Y,str(row['d_imp']))
c.drawString(203,Y,str(row['d_iva']))
def calce (c,row):
c.drawString(68,140,str(row['tot_merce']))
c.drawRightString(532,140,str(row['tot_ord']))
c.drawString(325,140,str(row['tot_iva']))
def querycorpo ():
return ''' select round(prez_un, 6) as prez_un ,
round(qt_ord, 6) as qt_ord, round(sc1,6) as sc1,
(substr(descriz,0,35)) as descriz, cod as codart, aliva as alivaart,
(round((prez_un * qt_ord),6) - round((prez_un * qt_ord * (round(sc1,6)/100)),6)) as tot_riga
from ordi2 where anno = "%s" and tipo_ord = "%s"
and num_ord = %s '''
def querytestata ():
return ''' select tipo_ord, stt_ord,rag_soc, piva, data_ord, num_ord, cod_cf, tpag,
cap_zona_pr, indi, tabgen.descriz as dpag
from
(select stt_ord, rag_soc, indiriz_cap_zona, cap_zona_pr, indi,
data_ord, num_ord, cod_cf, tpag, anag.piva as piva, tipo_ord
from
(select(rag_soc1||" "||rag_soc2) as rag_soc, (indiriz) as indi,
(indiriz||" "||cap||" "||zona||" "||localit)as indiriz_cap_zona,
(cap||" "||zona||" "||localit||" "||pr )as cap_zona_pr,
ordi1.pagam as tpag, data_ord, num_ord, cod_age, cod_cf, stt_ord,
ordi1.cod_cf as codcf, tipo_ord
from ordi1 where anno = "%s" and tipo_ord = "%s"
and num_ord = %s),anag
where anag.cod = codcf), tabgen
where tabgen.cod = "PAGAM" and tabgen.valore = tpag'''
def querydettaglioiva ():
return ''' select d_imp, d_iva, imp_iva, tabgen.descriz as d_des
from
(select sum(ordi2.tot_riga) as d_imp, ordi2.aliva as d_iva,
(sum(ordi2.tot_riga) * round(ordi2.aliva,2)/100) as imp_iva
from ordi2 where anno = "%s" and tipo_ord = "%s"
and num_ord = %s group by aliva) as ordi, tabgen
where tabgen.cod = "ALIVA" and tabgen.valore = d_iva '''
def querycalce ():
return ''' select sum(ordi.d_imp) as tot_merce,
sum(ordi.imp_iva) as tot_iva,
sum(ordi.d_imp) + sum(ordi.imp_iva) as tot_ord
from
(select sum(ordi2.tot_riga) as d_imp,
(sum(ordi2.tot_riga) * round(ordi2.aliva,2)/100) as imp_iva
from ordi2
where anno = "%s" and tipo_ord = "%s" and num_ord = %s
group by aliva) as ordi '''
def fontcorpo ():
return 12
def Ycorpo ():
return 553
def fontdettaglioiva ():
return 12
def Ydettaglioiva ():
return 140
def fineSeq ():
return 195
|
phasis/phasis
|
phasis/finc/oc.py
|
Python
|
gpl-2.0
| 10,870
|
#!/usr/bin/python
# Copyright (c) 2014 Wladimir J. van der Laan
# Distributed under the MIT/X11 software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
'''
Run this script from the root of the repository to update all translations from
transifex.
It will do the following automatically:
- fetch all translations using the tx tool
- post-process them into valid and committable format
- remove invalid control characters
- remove location tags (makes diffs less noisy)
TODO:
- auto-add new translations to the build system according to the translation process
- remove 'unfinished' translation items
'''
from __future__ import division, print_function
import subprocess
import re
import sys
import os
# Name of transifex tool
TX = 'tx'
# Name of source language file
SOURCE_LANG = 'ekwicoin_en.ts'
# Directory with locale files
LOCALE_DIR = 'src/qt/locale'
def check_at_repository_root():
if not os.path.exists('.git'):
print('No .git directory found')
print('Execute this script at the root of the repository', file=sys.stderr)
exit(1)
def fetch_all_translations():
if subprocess.call([TX, 'pull', '-f']):
print('Error while fetching translations', file=sys.stderr)
exit(1)
def postprocess_translations():
print('Postprocessing...')
for filename in os.listdir(LOCALE_DIR):
# process only language files, and do not process source language
if not filename.endswith('.ts') or filename == SOURCE_LANG:
continue
filepath = os.path.join(LOCALE_DIR, filename)
with open(filepath, 'rb') as f:
data = f.read()
# remove non-allowed control characters
data = re.sub('[\x00-\x09\x0b\x0c\x0e-\x1f]', '', data)
data = data.split('\n')
# strip locations from non-origin translation
# location tags are used to guide translators, they are not necessary for compilation
# TODO: actually process XML instead of relying on Transifex's one-tag-per-line output format
data = [line for line in data if not '<location' in line]
with open(filepath, 'wb') as f:
f.write('\n'.join(data))
if __name__ == '__main__':
check_at_repository_root()
fetch_all_translations()
postprocess_translations()
|
KaSt/ekwicoin
|
contrib/devtools/update-translations.py
|
Python
|
mit
| 2,335
|
"""
python version compatibility code
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import codecs
import functools
import inspect
import re
import sys
from contextlib import contextmanager
import py
import six
from six import text_type
import _pytest
from _pytest.outcomes import fail
from _pytest.outcomes import TEST_OUTCOME
try:
import enum
except ImportError: # pragma: no cover
# Only available in Python 3.4+ or as a backport
enum = None
_PY3 = sys.version_info > (3, 0)
_PY2 = not _PY3
if _PY3:
from inspect import signature, Parameter as Parameter
else:
from funcsigs import signature, Parameter as Parameter
NoneType = type(None)
NOTSET = object()
PY35 = sys.version_info[:2] >= (3, 5)
PY36 = sys.version_info[:2] >= (3, 6)
MODULE_NOT_FOUND_ERROR = "ModuleNotFoundError" if PY36 else "ImportError"
if _PY3:
from collections.abc import MutableMapping as MappingMixin
from collections.abc import Mapping, Sequence
else:
# those raise DeprecationWarnings in Python >=3.7
from collections import MutableMapping as MappingMixin # noqa
from collections import Mapping, Sequence # noqa
if sys.version_info >= (3, 4):
from importlib.util import spec_from_file_location
else:
def spec_from_file_location(*_, **__):
return None
def _format_args(func):
return str(signature(func))
isfunction = inspect.isfunction
isclass = inspect.isclass
# used to work around a python2 exception info leak
exc_clear = getattr(sys, "exc_clear", lambda: None)
# The type of re.compile objects is not exposed in Python.
REGEX_TYPE = type(re.compile(""))
def is_generator(func):
genfunc = inspect.isgeneratorfunction(func)
return genfunc and not iscoroutinefunction(func)
def iscoroutinefunction(func):
"""Return True if func is a decorated coroutine function.
Note: copied and modified from Python 3.5's builtin couroutines.py to avoid import asyncio directly,
which in turns also initializes the "logging" module as side-effect (see issue #8).
"""
return getattr(func, "_is_coroutine", False) or (
hasattr(inspect, "iscoroutinefunction") and inspect.iscoroutinefunction(func)
)
def getlocation(function, curdir):
function = get_real_func(function)
fn = py.path.local(inspect.getfile(function))
lineno = function.__code__.co_firstlineno
if fn.relto(curdir):
fn = fn.relto(curdir)
return "%s:%d" % (fn, lineno + 1)
def num_mock_patch_args(function):
""" return number of arguments used up by mock arguments (if any) """
patchings = getattr(function, "patchings", None)
if not patchings:
return 0
mock_modules = [sys.modules.get("mock"), sys.modules.get("unittest.mock")]
if any(mock_modules):
sentinels = [m.DEFAULT for m in mock_modules if m is not None]
return len(
[p for p in patchings if not p.attribute_name and p.new in sentinels]
)
return len(patchings)
def getfuncargnames(function, is_method=False, cls=None):
"""Returns the names of a function's mandatory arguments.
This should return the names of all function arguments that:
* Aren't bound to an instance or type as in instance or class methods.
* Don't have default values.
* Aren't bound with functools.partial.
* Aren't replaced with mocks.
The is_method and cls arguments indicate that the function should
be treated as a bound method even though it's not unless, only in
the case of cls, the function is a static method.
@RonnyPfannschmidt: This function should be refactored when we
revisit fixtures. The fixture mechanism should ask the node for
the fixture names, and not try to obtain directly from the
function object well after collection has occurred.
"""
# The parameters attribute of a Signature object contains an
# ordered mapping of parameter names to Parameter instances. This
# creates a tuple of the names of the parameters that don't have
# defaults.
try:
parameters = signature(function).parameters
except (ValueError, TypeError) as e:
fail(
"Could not determine arguments of {!r}: {}".format(function, e),
pytrace=False,
)
arg_names = tuple(
p.name
for p in parameters.values()
if (
p.kind is Parameter.POSITIONAL_OR_KEYWORD
or p.kind is Parameter.KEYWORD_ONLY
)
and p.default is Parameter.empty
)
# If this function should be treated as a bound method even though
# it's passed as an unbound method or function, remove the first
# parameter name.
if is_method or (
cls and not isinstance(cls.__dict__.get(function.__name__, None), staticmethod)
):
arg_names = arg_names[1:]
# Remove any names that will be replaced with mocks.
if hasattr(function, "__wrapped__"):
arg_names = arg_names[num_mock_patch_args(function) :]
return arg_names
@contextmanager
def dummy_context_manager():
"""Context manager that does nothing, useful in situations where you might need an actual context manager or not
depending on some condition. Using this allow to keep the same code"""
yield
def get_default_arg_names(function):
# Note: this code intentionally mirrors the code at the beginning of getfuncargnames,
# to get the arguments which were excluded from its result because they had default values
return tuple(
p.name
for p in signature(function).parameters.values()
if p.kind in (Parameter.POSITIONAL_OR_KEYWORD, Parameter.KEYWORD_ONLY)
and p.default is not Parameter.empty
)
if _PY3:
STRING_TYPES = bytes, str
UNICODE_TYPES = six.text_type
if PY35:
def _bytes_to_ascii(val):
return val.decode("ascii", "backslashreplace")
else:
def _bytes_to_ascii(val):
if val:
# source: http://goo.gl/bGsnwC
encoded_bytes, _ = codecs.escape_encode(val)
return encoded_bytes.decode("ascii")
else:
# empty bytes crashes codecs.escape_encode (#1087)
return ""
def ascii_escaped(val):
"""If val is pure ascii, returns it as a str(). Otherwise, escapes
bytes objects into a sequence of escaped bytes:
b'\xc3\xb4\xc5\xd6' -> u'\\xc3\\xb4\\xc5\\xd6'
and escapes unicode objects into a sequence of escaped unicode
ids, e.g.:
'4\\nV\\U00043efa\\x0eMXWB\\x1e\\u3028\\u15fd\\xcd\\U0007d944'
note:
the obvious "v.decode('unicode-escape')" will return
valid utf-8 unicode if it finds them in bytes, but we
want to return escaped bytes for any byte, even if they match
a utf-8 string.
"""
if isinstance(val, bytes):
return _bytes_to_ascii(val)
else:
return val.encode("unicode_escape").decode("ascii")
else:
STRING_TYPES = six.string_types
UNICODE_TYPES = six.text_type
def ascii_escaped(val):
"""In py2 bytes and str are the same type, so return if it's a bytes
object, return it unchanged if it is a full ascii string,
otherwise escape it into its binary form.
If it's a unicode string, change the unicode characters into
unicode escapes.
"""
if isinstance(val, bytes):
try:
return val.encode("ascii")
except UnicodeDecodeError:
return val.encode("string-escape")
else:
return val.encode("unicode-escape")
class _PytestWrapper(object):
"""Dummy wrapper around a function object for internal use only.
Used to correctly unwrap the underlying function object
when we are creating fixtures, because we wrap the function object ourselves with a decorator
to issue warnings when the fixture function is called directly.
"""
def __init__(self, obj):
self.obj = obj
def get_real_func(obj):
""" gets the real function object of the (possibly) wrapped object by
functools.wraps or functools.partial.
"""
start_obj = obj
for i in range(100):
# __pytest_wrapped__ is set by @pytest.fixture when wrapping the fixture function
# to trigger a warning if it gets called directly instead of by pytest: we don't
# want to unwrap further than this otherwise we lose useful wrappings like @mock.patch (#3774)
new_obj = getattr(obj, "__pytest_wrapped__", None)
if isinstance(new_obj, _PytestWrapper):
obj = new_obj.obj
break
new_obj = getattr(obj, "__wrapped__", None)
if new_obj is None:
break
obj = new_obj
else:
raise ValueError(
("could not find real function of {start}\nstopped at {current}").format(
start=py.io.saferepr(start_obj), current=py.io.saferepr(obj)
)
)
if isinstance(obj, functools.partial):
obj = obj.func
return obj
def get_real_method(obj, holder):
"""
Attempts to obtain the real function object that might be wrapping ``obj``, while at the same time
returning a bound method to ``holder`` if the original object was a bound method.
"""
try:
is_method = hasattr(obj, "__func__")
obj = get_real_func(obj)
except Exception:
return obj
if is_method and hasattr(obj, "__get__") and callable(obj.__get__):
obj = obj.__get__(holder)
return obj
def getfslineno(obj):
# xxx let decorators etc specify a sane ordering
obj = get_real_func(obj)
if hasattr(obj, "place_as"):
obj = obj.place_as
fslineno = _pytest._code.getfslineno(obj)
assert isinstance(fslineno[1], int), obj
return fslineno
def getimfunc(func):
try:
return func.__func__
except AttributeError:
return func
def safe_getattr(object, name, default):
""" Like getattr but return default upon any Exception or any OutcomeException.
Attribute access can potentially fail for 'evil' Python objects.
See issue #214.
It catches OutcomeException because of #2490 (issue #580), new outcomes are derived from BaseException
instead of Exception (for more details check #2707)
"""
try:
return getattr(object, name, default)
except TEST_OUTCOME:
return default
def safe_isclass(obj):
"""Ignore any exception via isinstance on Python 3."""
try:
return isclass(obj)
except Exception:
return False
def _is_unittest_unexpected_success_a_failure():
"""Return if the test suite should fail if an @expectedFailure unittest test PASSES.
From https://docs.python.org/3/library/unittest.html?highlight=unittest#unittest.TestResult.wasSuccessful:
Changed in version 3.4: Returns False if there were any
unexpectedSuccesses from tests marked with the expectedFailure() decorator.
"""
return sys.version_info >= (3, 4)
if _PY3:
def safe_str(v):
"""returns v as string"""
return str(v)
else:
def safe_str(v):
"""returns v as string, converting to ascii if necessary"""
try:
return str(v)
except UnicodeError:
if not isinstance(v, text_type):
v = text_type(v)
errors = "replace"
return v.encode("utf-8", errors)
COLLECT_FAKEMODULE_ATTRIBUTES = (
"Collector",
"Module",
"Generator",
"Function",
"Instance",
"Session",
"Item",
"Class",
"File",
"_fillfuncargs",
)
def _setup_collect_fakemodule():
from types import ModuleType
import pytest
pytest.collect = ModuleType("pytest.collect")
pytest.collect.__all__ = [] # used for setns
for attr in COLLECT_FAKEMODULE_ATTRIBUTES:
setattr(pytest.collect, attr, getattr(pytest, attr))
if _PY2:
# Without this the test_dupfile_on_textio will fail, otherwise CaptureIO could directly inherit from StringIO.
from py.io import TextIO
class CaptureIO(TextIO):
@property
def encoding(self):
return getattr(self, "_encoding", "UTF-8")
else:
import io
class CaptureIO(io.TextIOWrapper):
def __init__(self):
super(CaptureIO, self).__init__(
io.BytesIO(), encoding="UTF-8", newline="", write_through=True
)
def getvalue(self):
return self.buffer.getvalue().decode("UTF-8")
class FuncargnamesCompatAttr(object):
""" helper class so that Metafunc, Function and FixtureRequest
don't need to each define the "funcargnames" compatibility attribute.
"""
@property
def funcargnames(self):
""" alias attribute for ``fixturenames`` for pre-2.3 compatibility"""
return self.fixturenames
if six.PY2:
def lru_cache(*_, **__):
def dec(fn):
return fn
return dec
else:
from functools import lru_cache # noqa: F401
|
txomon/pytest
|
src/_pytest/compat.py
|
Python
|
mit
| 13,160
|
import uuid
from django.core.management import call_command
from oscar.core.loading import get_model
from testfixtures import LogCapture
from ecommerce.coupons.tests.mixins import CouponMixin
from ecommerce.tests.testcases import TestCase
Product = get_model('catalogue', 'Product')
LOGGER_NAME = 'ecommerce.extensions.catalogue.management.commands.populate_enterprise_id_product_attribute'
class PopulateEnterpriseIDProductAttributeTests(TestCase, CouponMixin):
"""Tests for populate_enterprise_id_product_attribute management command."""
def test_no_coupons_found(self):
"""Test that command logs no offer needs to be changed."""
with LogCapture(LOGGER_NAME) as log:
call_command('populate_enterprise_id_product_attribute')
log.check(
(
LOGGER_NAME,
'INFO',
'Found 0 coupon products to update.'
)
)
def test_populate_enterprise_id_product_attribute(self):
"""Test that command populates the enterprise id product attribute."""
enterprise_id = str(uuid.uuid4())
coupon = self.create_coupon(enterprise_customer=enterprise_id)
expected = [
(
LOGGER_NAME,
'INFO',
'Found 1 coupon products to update.'
),
(
LOGGER_NAME,
'INFO',
'Processing batch from index 0 to 100'
),
(
LOGGER_NAME,
'INFO',
'Setting enterprise id product attribute for Product {} to value {}'.format(coupon.id, enterprise_id)
),
]
with LogCapture(LOGGER_NAME) as log:
call_command('populate_enterprise_id_product_attribute')
log.check(*expected)
coupon = Product.objects.get(id=coupon.id)
assert coupon.attr.enterprise_customer_uuid == enterprise_id
def test_populate_enterprise_id_product_attribute_in_batches(self):
"""Test that command populates enterprise id product attribute in batches."""
coupon_count = 10
coupon_ids = []
enterprise_ids = []
log_messages = []
for idx in range(coupon_count):
enterprise_id = str(uuid.uuid4())
coupon = self.create_coupon(title='Test Coupon {}'.format(idx), enterprise_customer=enterprise_id)
coupon_ids.append(coupon.id)
enterprise_ids.append(enterprise_id)
log_messages.append(
(
LOGGER_NAME,
'INFO',
'Setting enterprise id product attribute for Product {} to value {}'.format(
coupon.id, enterprise_id)
)
)
expected = [
(
LOGGER_NAME,
'INFO',
'Found {} coupon products to update.'.format(coupon_count)
),
(
LOGGER_NAME,
'INFO',
'Processing batch from index 0 to 5'
),
]
expected.extend(log_messages[:5])
expected.append(
(
LOGGER_NAME,
'INFO',
'Processing batch from index 5 to 10'
),
)
expected.extend(log_messages[5:])
with LogCapture(LOGGER_NAME) as log:
call_command('populate_enterprise_id_product_attribute', limit=5)
log.check(*expected)
for idx in range(coupon_count):
coupon = Product.objects.get(id=coupon_ids[idx])
assert coupon.attr.enterprise_customer_uuid == enterprise_ids[idx]
def test_populate_enterprise_id_product_attribute_with_exception(self):
"""Test that command with exception."""
self.create_coupon(enterprise_customer=str(uuid.uuid4()))
expected = [
(
LOGGER_NAME,
'INFO',
'Found 1 coupon products to update.'
),
(
LOGGER_NAME,
'ERROR',
'Command execution failed while executing batch -1,10\nNegative indexing is not supported.'
)
]
with LogCapture(LOGGER_NAME) as log:
call_command('populate_enterprise_id_product_attribute', offset=-1, limit=10)
log.check(*expected)
|
edx/ecommerce
|
ecommerce/extensions/catalogue/management/commands/tests/test_populate_enterprise_id_product_attribute.py
|
Python
|
agpl-3.0
| 4,461
|
##############################################################################
# Copyright (c) 2013-2018, Lawrence Livermore National Security, LLC.
# Produced at the Lawrence Livermore National Laboratory.
#
# This file is part of Spack.
# Created by Todd Gamblin, tgamblin@llnl.gov, All rights reserved.
# LLNL-CODE-647188
#
# For details, see https://github.com/spack/spack
# Please also see the NOTICE and LICENSE files for our notice and the LGPL.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License (as
# published by the Free Software Foundation) version 2.1, February 1999.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the terms and
# conditions of the GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
##############################################################################
import os
import os.path
import inspect
import hashlib
import spack.error
import spack.fetch_strategy as fs
import spack.stage
from spack.util.crypto import checksum, Checker
from llnl.util.filesystem import working_dir
from spack.util.executable import which
from spack.util.compression import allowed_archive
def absolute_path_for_package(pkg):
"""Returns the absolute path to the ``package.py`` file implementing
the recipe for the package passed as argument.
Args:
pkg: a valid package object, or a Dependency object.
"""
if isinstance(pkg, spack.dependency.Dependency):
pkg = pkg.pkg
m = inspect.getmodule(pkg)
return os.path.abspath(m.__file__)
class Patch(object):
"""Base class to describe a patch that needs to be applied to some
expanded source code.
"""
@staticmethod
def create(pkg, path_or_url, level=1, working_dir=".", **kwargs):
"""
Factory method that creates an instance of some class derived from
Patch
Args:
pkg: package that needs to be patched
path_or_url: path or url where the patch is found
level: patch level (default 1)
working_dir (str): dir to change to before applying (default '.')
Returns:
instance of some Patch class
"""
# Check if we are dealing with a URL
if '://' in path_or_url:
return UrlPatch(path_or_url, level, working_dir, **kwargs)
# Assume patches are stored in the repository
return FilePatch(pkg, path_or_url, level, working_dir)
def __init__(self, path_or_url, level, working_dir):
# Check on level (must be an integer > 0)
if not isinstance(level, int) or not level >= 0:
raise ValueError("Patch level needs to be a non-negative integer.")
# Attributes shared by all patch subclasses
self.path_or_url = path_or_url
self.level = level
self.working_dir = working_dir
# self.path needs to be computed by derived classes
# before a call to apply
self.path = None
if not isinstance(self.level, int) or not self.level >= 0:
raise ValueError("Patch level needs to be a non-negative integer.")
def apply(self, stage):
"""Apply the patch at self.path to the source code in the
supplied stage
Args:
stage: stage for the package that needs to be patched
"""
patch = which("patch", required=True)
with working_dir(stage.source_path):
# Use -N to allow the same patches to be applied multiple times.
patch('-s', '-p', str(self.level), '-i', self.path,
"-d", self.working_dir)
class FilePatch(Patch):
"""Describes a patch that is retrieved from a file in the repository"""
def __init__(self, pkg, path_or_url, level, working_dir):
super(FilePatch, self).__init__(path_or_url, level, working_dir)
pkg_dir = os.path.dirname(absolute_path_for_package(pkg))
self.path = os.path.join(pkg_dir, path_or_url)
if not os.path.isfile(self.path):
raise NoSuchPatchError(
"No such patch for package %s: %s" % (pkg.name, self.path))
self._sha256 = None
@property
def sha256(self):
if self._sha256 is None:
self._sha256 = checksum(hashlib.sha256, self.path)
return self._sha256
class UrlPatch(Patch):
"""Describes a patch that is retrieved from a URL"""
def __init__(self, path_or_url, level, working_dir, **kwargs):
super(UrlPatch, self).__init__(path_or_url, level, working_dir)
self.url = path_or_url
self.archive_sha256 = None
if allowed_archive(self.url):
if 'archive_sha256' not in kwargs:
raise PatchDirectiveError(
"Compressed patches require 'archive_sha256' "
"and patch 'sha256' attributes: %s" % self.url)
self.archive_sha256 = kwargs.get('archive_sha256')
if 'sha256' not in kwargs:
raise PatchDirectiveError("URL patches require a sha256 checksum")
self.sha256 = kwargs.get('sha256')
def apply(self, stage):
"""Retrieve the patch in a temporary stage, computes
self.path and calls `super().apply(stage)`
Args:
stage: stage for the package that needs to be patched
"""
# use archive digest for compressed archives
fetch_digest = self.sha256
if self.archive_sha256:
fetch_digest = self.archive_sha256
fetcher = fs.URLFetchStrategy(self.url, fetch_digest)
mirror = os.path.join(
os.path.dirname(stage.mirror_path),
os.path.basename(self.url))
with spack.stage.Stage(fetcher, mirror_path=mirror) as patch_stage:
patch_stage.fetch()
patch_stage.check()
patch_stage.cache_local()
root = patch_stage.path
if self.archive_sha256:
patch_stage.expand_archive()
root = patch_stage.source_path
files = os.listdir(root)
if not files:
if self.archive_sha256:
raise NoSuchPatchError(
"Archive was empty: %s" % self.url)
else:
raise NoSuchPatchError(
"Patch failed to download: %s" % self.url)
self.path = os.path.join(root, files.pop())
if not os.path.isfile(self.path):
raise NoSuchPatchError(
"Archive %s contains no patch file!" % self.url)
# for a compressed archive, Need to check the patch sha256 again
# and the patch is in a directory, not in the same place
if self.archive_sha256:
checker = Checker(self.sha256)
if not checker.check(self.path):
raise fs.ChecksumError(
"sha256 checksum failed for %s" % self.path,
"Expected %s but got %s" % (self.sha256, checker.sum))
super(UrlPatch, self).apply(stage)
class NoSuchPatchError(spack.error.SpackError):
"""Raised when a patch file doesn't exist."""
class PatchDirectiveError(spack.error.SpackError):
"""Raised when the wrong arguments are suppled to the patch directive."""
|
mfherbst/spack
|
lib/spack/spack/patch.py
|
Python
|
lgpl-2.1
| 7,695
|
# -*- coding: utf-8 -*-
import pytest
import env # noqa: F401
from pybind11_tests import ConstructorStats
from pybind11_tests import multiple_inheritance as m
def test_multiple_inheritance_cpp():
mt = m.MIType(3, 4)
assert mt.foo() == 3
assert mt.bar() == 4
@pytest.mark.skipif("env.PYPY and env.PY2")
@pytest.mark.xfail("env.PYPY and not env.PY2")
def test_multiple_inheritance_mix1():
class Base1:
def __init__(self, i):
self.i = i
def foo(self):
return self.i
class MITypePy(Base1, m.Base2):
def __init__(self, i, j):
Base1.__init__(self, i)
m.Base2.__init__(self, j)
mt = MITypePy(3, 4)
assert mt.foo() == 3
assert mt.bar() == 4
def test_multiple_inheritance_mix2():
class Base2:
def __init__(self, i):
self.i = i
def bar(self):
return self.i
class MITypePy(m.Base1, Base2):
def __init__(self, i, j):
m.Base1.__init__(self, i)
Base2.__init__(self, j)
mt = MITypePy(3, 4)
assert mt.foo() == 3
assert mt.bar() == 4
@pytest.mark.skipif("env.PYPY and env.PY2")
@pytest.mark.xfail("env.PYPY and not env.PY2")
def test_multiple_inheritance_python():
class MI1(m.Base1, m.Base2):
def __init__(self, i, j):
m.Base1.__init__(self, i)
m.Base2.__init__(self, j)
class B1(object):
def v(self):
return 1
class MI2(B1, m.Base1, m.Base2):
def __init__(self, i, j):
B1.__init__(self)
m.Base1.__init__(self, i)
m.Base2.__init__(self, j)
class MI3(MI2):
def __init__(self, i, j):
MI2.__init__(self, i, j)
class MI4(MI3, m.Base2):
def __init__(self, i, j):
MI3.__init__(self, i, j)
# This should be ignored (Base2 is already initialized via MI2):
m.Base2.__init__(self, i + 100)
class MI5(m.Base2, B1, m.Base1):
def __init__(self, i, j):
B1.__init__(self)
m.Base1.__init__(self, i)
m.Base2.__init__(self, j)
class MI6(m.Base2, B1):
def __init__(self, i):
m.Base2.__init__(self, i)
B1.__init__(self)
class B2(B1):
def v(self):
return 2
class B3(object):
def v(self):
return 3
class B4(B3, B2):
def v(self):
return 4
class MI7(B4, MI6):
def __init__(self, i):
B4.__init__(self)
MI6.__init__(self, i)
class MI8(MI6, B3):
def __init__(self, i):
MI6.__init__(self, i)
B3.__init__(self)
class MI8b(B3, MI6):
def __init__(self, i):
B3.__init__(self)
MI6.__init__(self, i)
mi1 = MI1(1, 2)
assert mi1.foo() == 1
assert mi1.bar() == 2
mi2 = MI2(3, 4)
assert mi2.v() == 1
assert mi2.foo() == 3
assert mi2.bar() == 4
mi3 = MI3(5, 6)
assert mi3.v() == 1
assert mi3.foo() == 5
assert mi3.bar() == 6
mi4 = MI4(7, 8)
assert mi4.v() == 1
assert mi4.foo() == 7
assert mi4.bar() == 8
mi5 = MI5(10, 11)
assert mi5.v() == 1
assert mi5.foo() == 10
assert mi5.bar() == 11
mi6 = MI6(12)
assert mi6.v() == 1
assert mi6.bar() == 12
mi7 = MI7(13)
assert mi7.v() == 4
assert mi7.bar() == 13
mi8 = MI8(14)
assert mi8.v() == 1
assert mi8.bar() == 14
mi8b = MI8b(15)
assert mi8b.v() == 3
assert mi8b.bar() == 15
def test_multiple_inheritance_python_many_bases():
class MIMany14(m.BaseN1, m.BaseN2, m.BaseN3, m.BaseN4):
def __init__(self):
m.BaseN1.__init__(self, 1)
m.BaseN2.__init__(self, 2)
m.BaseN3.__init__(self, 3)
m.BaseN4.__init__(self, 4)
class MIMany58(m.BaseN5, m.BaseN6, m.BaseN7, m.BaseN8):
def __init__(self):
m.BaseN5.__init__(self, 5)
m.BaseN6.__init__(self, 6)
m.BaseN7.__init__(self, 7)
m.BaseN8.__init__(self, 8)
class MIMany916(
m.BaseN9,
m.BaseN10,
m.BaseN11,
m.BaseN12,
m.BaseN13,
m.BaseN14,
m.BaseN15,
m.BaseN16,
):
def __init__(self):
m.BaseN9.__init__(self, 9)
m.BaseN10.__init__(self, 10)
m.BaseN11.__init__(self, 11)
m.BaseN12.__init__(self, 12)
m.BaseN13.__init__(self, 13)
m.BaseN14.__init__(self, 14)
m.BaseN15.__init__(self, 15)
m.BaseN16.__init__(self, 16)
class MIMany19(MIMany14, MIMany58, m.BaseN9):
def __init__(self):
MIMany14.__init__(self)
MIMany58.__init__(self)
m.BaseN9.__init__(self, 9)
class MIMany117(MIMany14, MIMany58, MIMany916, m.BaseN17):
def __init__(self):
MIMany14.__init__(self)
MIMany58.__init__(self)
MIMany916.__init__(self)
m.BaseN17.__init__(self, 17)
# Inherits from 4 registered C++ classes: can fit in one pointer on any modern arch:
a = MIMany14()
for i in range(1, 4):
assert getattr(a, "f" + str(i))() == 2 * i
# Inherits from 8: requires 1/2 pointers worth of holder flags on 32/64-bit arch:
b = MIMany916()
for i in range(9, 16):
assert getattr(b, "f" + str(i))() == 2 * i
# Inherits from 9: requires >= 2 pointers worth of holder flags
c = MIMany19()
for i in range(1, 9):
assert getattr(c, "f" + str(i))() == 2 * i
# Inherits from 17: requires >= 3 pointers worth of holder flags
d = MIMany117()
for i in range(1, 17):
assert getattr(d, "f" + str(i))() == 2 * i
def test_multiple_inheritance_virtbase():
class MITypePy(m.Base12a):
def __init__(self, i, j):
m.Base12a.__init__(self, i, j)
mt = MITypePy(3, 4)
assert mt.bar() == 4
assert m.bar_base2a(mt) == 4
assert m.bar_base2a_sharedptr(mt) == 4
def test_mi_static_properties():
"""Mixing bases with and without static properties should be possible
and the result should be independent of base definition order"""
for d in (m.VanillaStaticMix1(), m.VanillaStaticMix2()):
assert d.vanilla() == "Vanilla"
assert d.static_func1() == "WithStatic1"
assert d.static_func2() == "WithStatic2"
assert d.static_func() == d.__class__.__name__
m.WithStatic1.static_value1 = 1
m.WithStatic2.static_value2 = 2
assert d.static_value1 == 1
assert d.static_value2 == 2
assert d.static_value == 12
d.static_value1 = 0
assert d.static_value1 == 0
d.static_value2 = 0
assert d.static_value2 == 0
d.static_value = 0
assert d.static_value == 0
# Requires PyPy 6+
def test_mi_dynamic_attributes():
"""Mixing bases with and without dynamic attribute support"""
for d in (m.VanillaDictMix1(), m.VanillaDictMix2()):
d.dynamic = 1
assert d.dynamic == 1
def test_mi_unaligned_base():
"""Returning an offset (non-first MI) base class pointer should recognize the instance"""
n_inst = ConstructorStats.detail_reg_inst()
c = m.I801C()
d = m.I801D()
# + 4 below because we have the two instances, and each instance has offset base I801B2
assert ConstructorStats.detail_reg_inst() == n_inst + 4
b1c = m.i801b1_c(c)
assert b1c is c
b2c = m.i801b2_c(c)
assert b2c is c
b1d = m.i801b1_d(d)
assert b1d is d
b2d = m.i801b2_d(d)
assert b2d is d
assert ConstructorStats.detail_reg_inst() == n_inst + 4 # no extra instances
del c, b1c, b2c
assert ConstructorStats.detail_reg_inst() == n_inst + 2
del d, b1d, b2d
assert ConstructorStats.detail_reg_inst() == n_inst
def test_mi_base_return():
"""Tests returning an offset (non-first MI) base class pointer to a derived instance"""
n_inst = ConstructorStats.detail_reg_inst()
c1 = m.i801c_b1()
assert type(c1) is m.I801C
assert c1.a == 1
assert c1.b == 2
d1 = m.i801d_b1()
assert type(d1) is m.I801D
assert d1.a == 1
assert d1.b == 2
assert ConstructorStats.detail_reg_inst() == n_inst + 4
c2 = m.i801c_b2()
assert type(c2) is m.I801C
assert c2.a == 1
assert c2.b == 2
d2 = m.i801d_b2()
assert type(d2) is m.I801D
assert d2.a == 1
assert d2.b == 2
assert ConstructorStats.detail_reg_inst() == n_inst + 8
del c2
assert ConstructorStats.detail_reg_inst() == n_inst + 6
del c1, d1, d2
assert ConstructorStats.detail_reg_inst() == n_inst
# Returning an unregistered derived type with a registered base; we won't
# pick up the derived type, obviously, but should still work (as an object
# of whatever type was returned).
e1 = m.i801e_c()
assert type(e1) is m.I801C
assert e1.a == 1
assert e1.b == 2
e2 = m.i801e_b2()
assert type(e2) is m.I801B2
assert e2.b == 2
def test_diamond_inheritance():
"""Tests that diamond inheritance works as expected (issue #959)"""
# Issue #959: this shouldn't segfault:
d = m.D()
# Make sure all the various distinct pointers are all recognized as registered instances:
assert d is d.c0()
assert d is d.c1()
assert d is d.b()
assert d is d.c0().b()
assert d is d.c1().b()
assert d is d.c0().c1().b().c0().b()
|
YannickJadoul/Parselmouth
|
pybind11/tests/test_multiple_inheritance.py
|
Python
|
gpl-3.0
| 9,495
|
import datetime
import itertools
import logging
import re
import time
from hashlib import sha1
from typing import List
import warnings
warnings.filterwarnings('ignore', category=DeprecationWarning, module='newrelic')
import newrelic.agent
from django.contrib.auth.models import User
from django.core.cache import cache
from django.core.exceptions import ObjectDoesNotExist
from django.core.validators import MinLengthValidator
from django.db import models, transaction
from django.db.models import Count, Max, Min, Q, Subquery
from django.db.utils import ProgrammingError
from django.forms import model_to_dict
from django.utils import timezone
from treeherder.webapp.api.utils import REPO_GROUPS, to_timestamp
logger = logging.getLogger(__name__)
class FailuresQuerySet(models.QuerySet):
def by_bug(self, bug_id):
return self.filter(bug_id=int(bug_id))
def by_date(self, startday, endday):
return self.select_related('push', 'job').filter(job__push__time__range=(startday, endday))
def by_repo(self, name, bugjobmap=True):
if name in REPO_GROUPS:
repo = REPO_GROUPS[name]
return (
self.filter(job__repository_id__in=repo)
if bugjobmap
else self.filter(repository_id__in=repo)
)
elif name == 'all':
return self
else:
return (
self.filter(job__repository__name=name)
if bugjobmap
else self.filter(repository__name=name)
)
class NamedModel(models.Model):
id = models.AutoField(primary_key=True)
name = models.CharField(max_length=100, unique=True)
class Meta:
abstract = True
def __str__(self):
return self.name
class Product(NamedModel):
class Meta:
db_table = 'product'
class BuildPlatform(models.Model):
id = models.AutoField(primary_key=True)
os_name = models.CharField(max_length=25)
platform = models.CharField(max_length=100, db_index=True)
architecture = models.CharField(max_length=25, blank=True, db_index=True)
class Meta:
db_table = 'build_platform'
unique_together = ("os_name", "platform", "architecture")
def __str__(self):
return "{0} {1} {2}".format(self.os_name, self.platform, self.architecture)
class Option(NamedModel):
class Meta:
db_table = 'option'
class RepositoryGroup(NamedModel):
# Fields are pre-defined in fixtures/repository_group.json
description = models.TextField(blank=True)
class Meta:
db_table = 'repository_group'
class Repository(models.Model):
id = models.AutoField(primary_key=True)
repository_group = models.ForeignKey('RepositoryGroup', on_delete=models.CASCADE)
name = models.CharField(max_length=50, unique=True, db_index=True)
dvcs_type = models.CharField(max_length=25, db_index=True)
url = models.CharField(max_length=255)
branch = models.CharField(max_length=255, null=True, db_index=True)
codebase = models.CharField(max_length=50, blank=True, db_index=True)
description = models.TextField(blank=True)
active_status = models.CharField(max_length=7, blank=True, default='active', db_index=True)
life_cycle_order = models.PositiveIntegerField(null=True, default=None)
performance_alerts_enabled = models.BooleanField(default=False)
expire_performance_data = models.BooleanField(default=True)
is_try_repo = models.BooleanField(default=False)
tc_root_url = models.CharField(max_length=255, null=False, db_index=True)
class Meta:
db_table = 'repository'
verbose_name_plural = 'repositories'
@classmethod
def fetch_all_names(cls) -> List[str]:
return cls.objects.values_list('name', flat=True)
def __str__(self):
return "{0} {1}".format(self.name, self.repository_group)
class Push(models.Model):
"""
A push to a repository
A push should contain one or more commit objects, representing
the changesets that were part of the push
"""
repository = models.ForeignKey(Repository, on_delete=models.CASCADE)
revision = models.CharField(max_length=40, db_index=True)
author = models.CharField(max_length=150)
time = models.DateTimeField(db_index=True)
failures = FailuresQuerySet.as_manager()
objects = models.Manager()
class Meta:
db_table = 'push'
unique_together = ('repository', 'revision')
def __str__(self):
return "{0} {1}".format(self.repository.name, self.revision)
def total_jobs(self, job_type, result):
return self.jobs.filter(job_type=job_type, result=result).count()
def get_status(self):
"""
Gets a summary of what passed/failed for the push
"""
jobs = (
Job.objects.filter(push=self)
.filter(
Q(failure_classification__isnull=True)
| Q(failure_classification__name='not classified')
)
.exclude(tier=3)
)
status_dict = {'completed': 0, 'pending': 0, 'running': 0}
for (state, result, total) in jobs.values_list('state', 'result').annotate(
total=Count('result')
):
if state == 'completed':
status_dict[result] = total
status_dict[state] += total
else:
status_dict[state] = total
if 'superseded' in status_dict:
# backward compatability for API consumers
status_dict['coalesced'] = status_dict['superseded']
return status_dict
class Commit(models.Model):
"""
A single commit in a push
"""
push = models.ForeignKey(Push, on_delete=models.CASCADE, related_name='commits')
revision = models.CharField(max_length=40, db_index=True)
author = models.CharField(max_length=150)
comments = models.TextField()
class Meta:
db_table = 'commit'
unique_together = ('push', 'revision')
def __str__(self):
return "{0} {1}".format(self.push.repository.name, self.revision)
class MachinePlatform(models.Model):
id = models.AutoField(primary_key=True)
os_name = models.CharField(max_length=25)
platform = models.CharField(max_length=100, db_index=True)
architecture = models.CharField(max_length=25, blank=True, db_index=True)
class Meta:
db_table = 'machine_platform'
unique_together = ("os_name", "platform", "architecture")
def __str__(self):
return "{0} {1} {2}".format(self.os_name, self.platform, self.architecture)
class Bugscache(models.Model):
id = models.PositiveIntegerField(primary_key=True)
status = models.CharField(max_length=64, db_index=True)
resolution = models.CharField(max_length=64, blank=True, db_index=True)
# Is covered by a FULLTEXT index created via a migrations RunSQL operation.
summary = models.CharField(max_length=255)
dupe_of = models.PositiveIntegerField(null=True)
crash_signature = models.TextField(blank=True)
keywords = models.TextField(blank=True)
modified = models.DateTimeField()
whiteboard = models.CharField(max_length=100, blank=True, default='')
processed_update = models.BooleanField(default=True)
class Meta:
db_table = 'bugscache'
verbose_name_plural = 'bugscache'
def __str__(self):
return "{0}".format(self.id)
@classmethod
def sanitized_search_term(self, search_term):
# MySQL Full Text Search operators, based on:
# https://dev.mysql.com/doc/refman/5.7/en/fulltext-boolean.html
# and other characters we want to remove
mysql_fts_operators_re = re.compile(r'[-+@<>()~*"\\]')
# Replace MySQL's Full Text Search Operators with spaces so searching
# for errors that have been pasted in still works.
return re.sub(mysql_fts_operators_re, " ", search_term)
@classmethod
def search(self, search_term):
max_size = 50
# 365 days ago as limit for recent bugs which get suggested by default
# if they are not resolved. Other bugs, both older or resolved, are
# hidden by default with a "Show / Hide More" link.
time_limit = datetime.datetime.now() - datetime.timedelta(days=365)
# Do not wrap a string in quotes to search as a phrase;
# see https://bugzilla.mozilla.org/show_bug.cgi?id=1704311
search_term_fulltext = self.sanitized_search_term(search_term)
# Substitute escape and wildcard characters, so the search term is used
# literally in the LIKE statement.
search_term_like = (
search_term.replace('=', '==').replace('%', '=%').replace('_', '=_').replace('\\"', '')
)
recent_qs = self.objects.raw(
"""
SELECT id, summary, crash_signature, keywords, resolution, status, dupe_of,
MATCH (`summary`) AGAINST (%s IN BOOLEAN MODE) AS relevance
FROM bugscache
WHERE 1
AND resolution = ''
AND `summary` LIKE CONCAT ('%%%%', %s, '%%%%') ESCAPE '='
AND modified >= %s
ORDER BY relevance DESC
LIMIT 0,%s
""",
[search_term_fulltext, search_term_like, time_limit, max_size],
)
exclude_fields = ["modified", "processed_update"]
try:
open_recent_match_string = [
model_to_dict(item, exclude=exclude_fields) for item in recent_qs
]
open_recent = [
match
for match in open_recent_match_string
if match["summary"].startswith(search_term)
or "/" + search_term in match["summary"]
or " " + search_term in match["summary"]
or "\\" + search_term in match["summary"]
or "," + search_term in match["summary"]
]
except ProgrammingError as e:
newrelic.agent.record_exception()
logger.error(
'Failed to execute FULLTEXT search on Bugscache, error={}, SQL={}'.format(
e, recent_qs.query.__str__()
)
)
open_recent = []
all_others_qs = self.objects.raw(
"""
SELECT id, summary, crash_signature, keywords, resolution, status, dupe_of,
MATCH (`summary`) AGAINST (%s IN BOOLEAN MODE) AS relevance
FROM bugscache
WHERE 1
AND `summary` LIKE CONCAT ('%%%%', %s, '%%%%') ESCAPE '='
AND (modified < %s OR resolution <> '')
ORDER BY relevance DESC
LIMIT 0,%s
""",
[search_term_fulltext, search_term_like, time_limit, max_size],
)
try:
all_others_match_string = [
model_to_dict(item, exclude=exclude_fields) for item in all_others_qs
]
all_others = [
match
for match in all_others_match_string
if match["summary"].startswith(search_term)
or "/" + search_term in match["summary"]
or " " + search_term in match["summary"]
or "\\" + search_term in match["summary"]
or "," + search_term in match["summary"]
]
except ProgrammingError as e:
newrelic.agent.record_exception()
logger.error(
'Failed to execute FULLTEXT search on Bugscache, error={}, SQL={}'.format(
e, recent_qs.query.__str__()
)
)
all_others = []
return {"open_recent": open_recent, "all_others": all_others}
class BugzillaComponent(models.Model):
product = models.CharField(max_length=60)
component = models.CharField(max_length=60)
class Meta:
db_table = 'bugzilla_component'
verbose_name_plural = 'bugzilla_components'
unique_together = ("product", "component")
def __str__(self):
return "{0} :: {1}".format(self.product, self.component)
class FilesBugzillaMap(models.Model):
path = models.CharField(max_length=255, unique=True, db_index=True)
file_name = models.CharField(max_length=255, db_index=True)
bugzilla_component = models.ForeignKey('BugzillaComponent', on_delete=models.CASCADE)
class Meta:
db_table = 'file_bugzilla_component'
verbose_name_plural = 'files_bugzilla_components'
def __str__(self):
return "{0}".format(self.path)
class BugzillaSecurityGroup(models.Model):
product = models.CharField(max_length=60, unique=True, db_index=True)
security_group = models.CharField(max_length=60)
class Meta:
db_table = 'bugzilla_security_group'
verbose_name_plural = 'bugzilla_security_groups'
class Machine(NamedModel):
class Meta:
db_table = 'machine'
class JobGroup(models.Model):
id = models.AutoField(primary_key=True)
symbol = models.CharField(max_length=25, default='?', db_index=True)
name = models.CharField(max_length=100)
description = models.TextField(blank=True)
class Meta:
db_table = 'job_group'
unique_together = ('name', 'symbol')
def __str__(self):
return "{0} ({1})".format(self.name, self.symbol)
class OptionCollectionManager(models.Manager):
cache_key = 'option_collection_map'
'''
Convenience function to determine the option collection map
'''
def get_option_collection_map(self):
option_collection_map = cache.get(self.cache_key)
if option_collection_map:
return option_collection_map
option_collection_map = {}
for (hash, option_name) in OptionCollection.objects.values_list(
'option_collection_hash', 'option__name'
):
if not option_collection_map.get(hash):
option_collection_map[hash] = option_name
else:
option_collection_map[hash] += ' ' + option_name
# Caches for the default of 5 minutes.
cache.set(self.cache_key, option_collection_map)
return option_collection_map
class OptionCollection(models.Model):
id = models.AutoField(primary_key=True)
option_collection_hash = models.CharField(max_length=40)
option = models.ForeignKey(Option, on_delete=models.CASCADE, db_index=True)
objects = OptionCollectionManager()
@staticmethod
def calculate_hash(options):
"""returns an option_collection_hash given a list of options"""
options = sorted(list(options))
sha_hash = sha1()
# equivalent to loop over the options and call sha_hash.update()
sha_hash.update(''.join(options).encode('utf-8'))
return sha_hash.hexdigest()
class Meta:
db_table = 'option_collection'
unique_together = ('option_collection_hash', 'option')
def __str__(self):
return "{0}".format(self.option)
class JobType(models.Model):
id = models.AutoField(primary_key=True)
symbol = models.CharField(max_length=25, default='?', db_index=True)
name = models.CharField(max_length=140)
description = models.TextField(blank=True)
class Meta:
db_table = 'job_type'
unique_together = (('name', 'symbol'),)
def __str__(self):
return "{0} ({1})".format(self.name, self.symbol)
class FailureClassification(NamedModel):
class Meta:
db_table = 'failure_classification'
class ReferenceDataSignatures(models.Model):
"""
A collection of all the possible combinations of reference data,
populated on data ingestion. signature is a hash of the data it refers to
build_system_type is buildbot by default
TODO: Rename to 'ReferenceDataSignature'.
"""
name = models.CharField(max_length=255)
signature = models.CharField(max_length=50, db_index=True)
build_os_name = models.CharField(max_length=25, db_index=True)
build_platform = models.CharField(max_length=100, db_index=True)
build_architecture = models.CharField(max_length=25, db_index=True)
machine_os_name = models.CharField(max_length=25, db_index=True)
machine_platform = models.CharField(max_length=100, db_index=True)
machine_architecture = models.CharField(max_length=25, db_index=True)
job_group_name = models.CharField(max_length=100, blank=True, db_index=True)
job_group_symbol = models.CharField(max_length=25, blank=True, db_index=True)
job_type_name = models.CharField(max_length=140, db_index=True)
job_type_symbol = models.CharField(max_length=25, blank=True, db_index=True)
option_collection_hash = models.CharField(max_length=64, blank=True, db_index=True)
build_system_type = models.CharField(max_length=25, blank=True, db_index=True)
repository = models.CharField(max_length=50, db_index=True)
first_submission_timestamp = models.IntegerField(db_index=True)
class Meta:
db_table = 'reference_data_signatures'
# Remove if/when the model is renamed to 'ReferenceDataSignature'.
verbose_name_plural = 'reference data signatures'
unique_together = ('name', 'signature', 'build_system_type', 'repository')
class JobManager(models.Manager):
"""
Convenience functions for operations on groups of jobs
"""
def cycle_data(self, cycle_interval, chunk_size, sleep_time):
"""
Delete data older than cycle_interval, splitting the target data into
chunks of chunk_size size. Returns the number of result sets deleted
"""
jobs_max_timestamp = datetime.datetime.now() - cycle_interval
jobs_cycled = 0
while True:
min_id = Job.objects.aggregate(Min("id"))["id__min"]
if min_id is None:
return jobs_cycled
max_id = min_id + chunk_size
max_chunk = Job.objects.filter(id__lt=max_id).aggregate(
submit_time=Max("submit_time"), id=Max("id"), count=Count("id")
)
if max_chunk["count"] == 0 or max_chunk["submit_time"] > jobs_max_timestamp:
# this next chunk is too young, we are done
return jobs_cycled
logger.warning(
"Pruning jobs: chunk of {} older than {}".format(
max_chunk["count"], jobs_max_timestamp.strftime("%b %d %Y")
)
)
# Remove ORM entries for these jobs that don't currently have a
# foreign key relation
logger.warning("deleting FailureLines")
delete_guid = list(
Job.objects.filter(id__lt=max_id).only("guid").values_list("guid", flat=True)
)
FailureLine.objects.filter(job_guid__in=delete_guid).only("id").delete()
# cycle jobs *after* related data has been deleted, to be sure
# we don't have any orphan data
logger.warning("delete jobs")
self.filter(id__lt=max_id).only("id").delete()
jobs_cycled += max_chunk["count"]
if sleep_time:
# Allow some time for other queries to get through
time.sleep(sleep_time)
class Job(models.Model):
"""
This class represents a build or test job in Treeherder
"""
failures = FailuresQuerySet.as_manager()
objects = JobManager()
id = models.BigAutoField(primary_key=True)
PENDING = 0
CROSSREFERENCED = 1
AUTOCLASSIFIED = 2
SKIPPED = 3
FAILED = 255
AUTOCLASSIFY_STATUSES = (
(PENDING, 'pending'),
(CROSSREFERENCED, 'crossreferenced'),
(AUTOCLASSIFIED, 'autoclassified'),
(SKIPPED, 'skipped'),
(FAILED, 'failed'),
)
repository = models.ForeignKey(Repository, on_delete=models.CASCADE)
guid = models.CharField(max_length=50, unique=True)
project_specific_id = models.PositiveIntegerField(null=True) # unused, see bug 1328985
# TODO: Remove autoclassify_status next time jobs table is modified (bug 1594822)
autoclassify_status = models.IntegerField(choices=AUTOCLASSIFY_STATUSES, default=PENDING)
# TODO: Remove coalesced_to_guid next time the jobs table is modified (bug 1402992)
coalesced_to_guid = models.CharField(max_length=50, null=True, default=None)
signature = models.ForeignKey(ReferenceDataSignatures, on_delete=models.CASCADE)
build_platform = models.ForeignKey(BuildPlatform, on_delete=models.CASCADE, related_name='jobs')
machine_platform = models.ForeignKey(MachinePlatform, on_delete=models.CASCADE)
machine = models.ForeignKey(Machine, on_delete=models.CASCADE)
option_collection_hash = models.CharField(max_length=64)
job_type = models.ForeignKey(JobType, on_delete=models.CASCADE, related_name='jobs')
job_group = models.ForeignKey(JobGroup, on_delete=models.CASCADE, related_name='jobs')
product = models.ForeignKey(Product, on_delete=models.CASCADE)
failure_classification = models.ForeignKey(
FailureClassification, on_delete=models.CASCADE, related_name='jobs'
)
who = models.CharField(max_length=50)
reason = models.CharField(max_length=125)
result = models.CharField(max_length=25)
state = models.CharField(max_length=25)
submit_time = models.DateTimeField()
start_time = models.DateTimeField()
end_time = models.DateTimeField()
last_modified = models.DateTimeField(auto_now=True, db_index=True)
# TODO: Remove next time we add/drop another field.
running_eta = models.PositiveIntegerField(null=True, default=None)
tier = models.PositiveIntegerField()
push = models.ForeignKey(Push, on_delete=models.CASCADE, related_name='jobs')
class Meta:
db_table = 'job'
index_together = [
# these speed up the various permutations of the "similar jobs"
# queries
('repository', 'job_type', 'start_time'),
('repository', 'build_platform', 'job_type', 'start_time'),
('repository', 'option_collection_hash', 'job_type', 'start_time'),
('repository', 'build_platform', 'option_collection_hash', 'job_type', 'start_time'),
# this is intended to speed up queries for specific platform /
# option collections on a push
('machine_platform', 'option_collection_hash', 'push'),
# speed up cycle data
('repository', 'submit_time'),
]
@property
def tier_is_sheriffable(self) -> bool:
"""
Tier 3 jobs are not considered stable enough to be sheriffed.
"""
return self.tier < 3
def __str__(self):
return "{0} {1} {2}".format(self.id, self.repository, self.guid)
def get_platform_option(self, option_collection_map=None):
if not hasattr(self, 'platform_option'):
self.platform_option = ''
option_hash = self.option_collection_hash
if option_hash:
if not option_collection_map:
option_collection_map = OptionCollection.objects.get_option_collection_map()
self.platform_option = option_collection_map.get(option_hash)
return self.platform_option
def save(self, *args, **kwargs):
self.last_modified = datetime.datetime.now()
super().save(*args, **kwargs)
def get_manual_classification_line(self):
"""
If this Job has a single TextLogError line, return that TextLogError.
Some Jobs only have one related TextLogError. This
method checks if this Job is one of those (returning None if not) by:
* checking the number of related TextLogErrors
* counting the number of search results for the single TextLogError
* checking there is a related FailureLine
* checking the related FailureLine is in a given state
If all these checks pass the TextLogError is returned, any failure returns None.
"""
try:
text_log_error = TextLogError.objects.get(job=self)
except (TextLogError.DoesNotExist, TextLogError.MultipleObjectsReturned):
return None
# Can this TextLogError be converted into a single "useful search"?
# FIXME: what is the significance of only one search result here?
from treeherder.model.error_summary import get_useful_search_results
search_results = get_useful_search_results(self)
if len(search_results) != 1:
return None
# Check that we have a related FailureLine
failure_line = text_log_error.get_failure_line()
if failure_line is None:
return None
# Check our FailureLine is in a state we expect for
# auto-classification.
if not (
failure_line.action == "test_result"
and failure_line.test
and failure_line.status
and failure_line.expected
):
return None
return text_log_error
def fetch_associated_decision_job(self):
decision_type = JobType.objects.filter(name="Gecko Decision Task", symbol="D")
return Job.objects.get(
repository_id=self.repository_id,
job_type_id=Subquery(decision_type.values('id')[:1]),
push_id=self.push_id,
)
@staticmethod
def get_duration(submit_time, start_time, end_time):
endtime = end_time if to_timestamp(end_time) else datetime.datetime.now()
starttime = start_time if to_timestamp(start_time) else submit_time
seconds = max((endtime - starttime).total_seconds(), 60)
return max(round(seconds / 60), 1)
class TaskclusterMetadata(models.Model):
"""
Taskcluster-specific metadata associated with a taskcluster job
"""
job = models.OneToOneField(
Job, on_delete=models.CASCADE, primary_key=True, related_name='taskcluster_metadata'
)
task_id = models.CharField(max_length=22, validators=[MinLengthValidator(22)], db_index=True)
retry_id = models.PositiveIntegerField()
class Meta:
db_table = "taskcluster_metadata"
class JobLog(models.Model):
"""
Represents a log associated with a job
There can be more than one of these associated with each job
"""
PENDING = 0
PARSED = 1
FAILED = 2
SKIPPED_SIZE = 3
STATUSES = (
(PENDING, 'pending'),
(PARSED, 'parsed'),
(FAILED, 'failed'),
(SKIPPED_SIZE, 'skipped-size'),
)
job = models.ForeignKey(Job, on_delete=models.CASCADE, related_name="job_log")
name = models.CharField(max_length=50)
url = models.URLField(max_length=255)
status = models.IntegerField(choices=STATUSES, default=PENDING)
class Meta:
db_table = "job_log"
unique_together = ('job', 'name', 'url')
def __str__(self):
return "{0} {1} {2} {3}".format(self.id, self.job.guid, self.name, self.status)
def update_status(self, status):
self.status = status
self.save(update_fields=['status'])
class BugJobMap(models.Model):
"""
Maps job_ids to related bug_ids
Mappings can be made manually through a UI or from doing lookups in the
BugsCache
"""
id = models.BigAutoField(primary_key=True)
job = models.ForeignKey(Job, on_delete=models.CASCADE)
bug_id = models.PositiveIntegerField(db_index=True)
created = models.DateTimeField(default=timezone.now)
user = models.ForeignKey(User, on_delete=models.CASCADE, null=True) # null if autoclassified
failures = FailuresQuerySet.as_manager()
objects = models.Manager()
class Meta:
db_table = "bug_job_map"
unique_together = ('job', 'bug_id')
@property
def who(self):
if self.user:
return self.user.email
else:
return "autoclassifier"
@classmethod
def create(cls, job_id, bug_id, user=None):
bug_map = BugJobMap.objects.create(
job_id=job_id,
bug_id=bug_id,
user=user,
)
if not user:
return bug_map
# We have a user so this wasn't triggered by auto-classification.
# However we need to update the ClassifiedFailure with the bug number
# we just used to create the BugJobMap.
text_log_error = bug_map.job.get_manual_classification_line()
if text_log_error is None:
return bug_map
classification = (
text_log_error.metadata.best_classification if text_log_error.metadata else None
)
if classification is None:
return bug_map # no classification to update
if classification.bug_number:
return bug_map # classification already has a bug number
classification.set_bug(bug_id)
return bug_map
def __str__(self):
return "{0} {1} {2} {3}".format(self.id, self.job.guid, self.bug_id, self.user)
class JobNote(models.Model):
"""
Associates a Failure type with a Job and optionally a text comment from a User.
Generally these are generated manually in the UI.
"""
id = models.BigAutoField(primary_key=True)
job = models.ForeignKey(Job, on_delete=models.CASCADE)
failure_classification = models.ForeignKey(FailureClassification, on_delete=models.CASCADE)
user = models.ForeignKey(User, on_delete=models.CASCADE, null=True) # null if autoclassified
text = models.TextField()
created = models.DateTimeField(default=timezone.now)
class Meta:
db_table = "job_note"
@property
def who(self):
if self.user:
return self.user.email
return "autoclassifier"
def _update_failure_type(self):
"""
Updates the failure type of this Note's Job.
Set the linked Job's failure type to that of the most recent JobNote or
set to Not Classified if there are no JobNotes.
This is called when JobNotes are created (via .save()) and deleted (via
.delete()) and is used to resolved the FailureClassification which has
been denormalised onto Job.
"""
# update the job classification
note = JobNote.objects.filter(job=self.job).order_by('-created').first()
if note:
self.job.failure_classification_id = note.failure_classification.id
else:
self.job.failure_classification_id = FailureClassification.objects.get(
name='not classified'
).id
self.job.save()
def _ensure_classification(self):
"""
Ensures a single TextLogError's related bugs have Classifications.
If the linked Job has a single meaningful TextLogError:
- find the bugs currently related to it via a Classification
- find the bugs mapped to the job related to this note
- find the bugs that are mapped but not classified
- link this subset of bugs to Classifications
- if there's only one new bug and no existing ones, verify it
"""
# if this note was automatically filed, don't update the auto-classification information
if not self.user:
return
# if the failure type isn't intermittent, ignore
if self.failure_classification.name not in ["intermittent", "intermittent needs filing"]:
return
# if the linked Job has more than one TextLogError, ignore
text_log_error = self.job.get_manual_classification_line()
if not text_log_error:
return
# evaluate the QuerySet here so it can be used when creating new_bugs below
existing_bugs = list(
ClassifiedFailure.objects.filter(
error_matches__text_log_error=text_log_error
).values_list('bug_number', flat=True)
)
new_bugs = self.job.bugjobmap_set.exclude(bug_id__in=existing_bugs).values_list(
'bug_id', flat=True
)
if not new_bugs:
return
# Create Match instances for each new bug
for bug_number in new_bugs:
classification, _ = ClassifiedFailure.objects.get_or_create(bug_number=bug_number)
text_log_error.create_match("ManualDetector", classification)
# if there's only one new bug and no existing ones, verify it
if len(new_bugs) == 1 and not existing_bugs:
text_log_error.verify_classification(classification)
def save(self, *args, **kwargs):
super().save(*args, **kwargs)
self._update_failure_type()
self._ensure_classification()
def delete(self, *args, **kwargs):
super().delete(*args, **kwargs)
self._update_failure_type()
self._ensure_classification()
def __str__(self):
return "{0} {1} {2} {3}".format(
self.id, self.job.guid, self.failure_classification, self.who
)
class FailureLine(models.Model):
# We make use of prefix indicies for several columns in this table which
# can't be expressed in django syntax so are created with raw sql in migrations.
STATUS_LIST = ('PASS', 'FAIL', 'OK', 'ERROR', 'TIMEOUT', 'CRASH', 'ASSERT', 'SKIP', 'NOTRUN')
# Truncated is a special action that we use to indicate that the list of failure lines
# was truncated according to settings.FAILURE_LINES_CUTOFF.
ACTION_LIST = ("test_result", "log", "crash", "truncated", "group_result")
LEVEL_LIST = ("critical", "error", "warning", "info", "debug")
# Python 3's zip produces an iterable rather than a list, which Django's `choices` can't handle.
ACTION_CHOICES = list(zip(ACTION_LIST, ACTION_LIST))
STATUS_CHOICES = list(zip(STATUS_LIST, STATUS_LIST))
LEVEL_CHOICES = list(zip(LEVEL_LIST, LEVEL_LIST))
id = models.BigAutoField(primary_key=True)
job_guid = models.CharField(max_length=50)
repository = models.ForeignKey(Repository, on_delete=models.CASCADE)
job_log = models.ForeignKey(
JobLog, on_delete=models.CASCADE, null=True, related_name="failure_line"
)
action = models.CharField(max_length=15, choices=ACTION_CHOICES)
line = models.PositiveIntegerField()
test = models.TextField(blank=True, null=True)
subtest = models.TextField(blank=True, null=True)
status = models.CharField(max_length=7, choices=STATUS_CHOICES)
expected = models.CharField(max_length=7, choices=STATUS_CHOICES, blank=True, null=True)
message = models.TextField(blank=True, null=True)
signature = models.TextField(blank=True, null=True)
level = models.CharField(max_length=8, choices=STATUS_CHOICES, blank=True, null=True)
stack = models.TextField(blank=True, null=True)
stackwalk_stdout = models.TextField(blank=True, null=True)
stackwalk_stderr = models.TextField(blank=True, null=True)
# Note that the case of best_classification = None and best_is_verified = True
# has the special semantic that the line is ignored and should not be considered
# for future autoclassifications.
best_classification = models.ForeignKey(
"ClassifiedFailure",
related_name="best_for_lines",
null=True,
db_index=True,
on_delete=models.SET_NULL,
)
best_is_verified = models.BooleanField(default=False)
created = models.DateTimeField(auto_now_add=True)
modified = models.DateTimeField(auto_now=True)
class Meta:
db_table = 'failure_line'
index_together = (
('job_guid', 'repository'),
# Prefix index: test(50), subtest(25), status, expected, created
('test', 'subtest', 'status', 'expected', 'created'),
# Prefix index: signature(25), test(50), created
('signature', 'test', 'created'),
)
unique_together = ('job_log', 'line')
def __str__(self):
return "{0} {1}".format(self.id, Job.objects.get(guid=self.job_guid).id)
@property
def error(self):
# Return the related text-log-error or None if there is no related field.
try:
return self.text_log_error_metadata.text_log_error
except TextLogErrorMetadata.DoesNotExist:
return None
def _serialized_components(self):
if self.action == "test_result":
return ["TEST-UNEXPECTED-%s" % self.status.upper(), self.test]
if self.action == "log":
return [self.level.upper(), self.message.split("\n")[0]]
def unstructured_bugs(self):
"""
Get bugs that match this line in the Bug Suggestions artifact for this job.
"""
components = self._serialized_components()
if not components:
return []
from treeherder.model.error_summary import get_useful_search_results
job = Job.objects.get(guid=self.job_guid)
rv = []
ids_seen = set()
for item in get_useful_search_results(job):
if all(component in item["search"] for component in components):
for suggestion in itertools.chain(
item["bugs"]["open_recent"], item["bugs"]["all_others"]
):
if suggestion["id"] not in ids_seen:
ids_seen.add(suggestion["id"])
rv.append(suggestion)
return rv
def to_dict(self):
try:
metadata = self.text_log_error_metadata
except ObjectDoesNotExist:
metadata = None
return {
'id': self.id,
'job_guid': self.job_guid,
'repository': self.repository_id,
'job_log': self.job_log_id,
'action': self.action,
'line': self.line,
'test': self.test,
'subtest': self.subtest,
'status': self.status,
'expected': self.expected,
'message': self.message,
'signature': self.signature,
'level': self.level,
'stack': self.stack,
'stackwalk_stdout': self.stackwalk_stdout,
'stackwalk_stderr': self.stackwalk_stderr,
'best_classification': metadata.best_classification_id if metadata else None,
'best_is_verified': metadata.best_is_verified if metadata else False,
'created': self.created,
'modified': self.modified,
}
def to_mozlog_format(self):
"""Convert a FailureLine into a mozlog formatted dictionary."""
data = {
"action": self.action,
"line_number": self.line,
"test": self.test,
"subtest": self.subtest,
"status": self.status,
"expected": self.expected,
"message": self.message,
"signature": self.signature,
"level": self.level,
"stack": self.stack,
"stackwalk_stdout": self.stackwalk_stdout,
"stackwalk_stderr": self.stackwalk_stderr,
}
# Remove empty values
data = {k: v for k, v in data.items() if v is not None}
return data
class Group(models.Model):
"""
The test harness group.
This is most often a manifest file. But in some instances where a suite
doesn't have manifests, or a test suite isn't logging its data properly,
this can simply be "default"
Note: This is not to be confused with JobGroup which is Treeherder specific.
"""
id = models.BigAutoField(primary_key=True)
name = models.CharField(max_length=255, unique=True)
job_logs = models.ManyToManyField("JobLog", through='GroupStatus', related_name='groups')
def __str__(self):
return self.name
class Meta:
db_table = 'group'
class GroupStatus(models.Model):
OK = 1
ERROR = 2
SKIP = 3
UNSUPPORTED = 10
STATUS_MAP = {"OK": OK, "ERROR": ERROR, "SKIP": SKIP}
STATUS_LOOKUP = {OK: "OK", ERROR: "ERROR", SKIP: "SKIP"}
status = models.SmallIntegerField()
job_log = models.ForeignKey(JobLog, on_delete=models.CASCADE, related_name="group_result")
group = models.ForeignKey(Group, on_delete=models.CASCADE, related_name="group_result")
@staticmethod
def get_status(status_str):
return (
GroupStatus.STATUS_MAP[status_str]
if status_str in GroupStatus.STATUS_MAP
else GroupStatus.UNSUPPORTED
)
class Meta:
db_table = 'group_status'
class ClassifiedFailure(models.Model):
"""
Classifies zero or more TextLogErrors as a failure.
Optionally linked to a bug.
"""
id = models.BigAutoField(primary_key=True)
text_log_errors = models.ManyToManyField(
"TextLogError", through='TextLogErrorMatch', related_name='classified_failures'
)
# Note that we use a bug number of 0 as a sentinel value to indicate lines that
# are not actually symptomatic of a real bug
bug_number = models.PositiveIntegerField(blank=True, null=True, unique=True)
created = models.DateTimeField(auto_now_add=True)
modified = models.DateTimeField(auto_now=True)
def __str__(self):
return "{0} {1}".format(self.id, self.bug_number)
def bug(self):
# Putting this here forces one query per object; there should be a way
# to make things more efficient
return Bugscache.objects.filter(id=self.bug_number).first()
def set_bug(self, bug_number):
"""
Set the bug number of this Classified Failure
If an existing ClassifiedFailure exists with the same bug number
replace this instance with the existing one.
"""
if bug_number == self.bug_number:
return self
other = ClassifiedFailure.objects.filter(bug_number=bug_number).first()
if not other:
self.bug_number = bug_number
self.save(update_fields=['bug_number'])
return self
self.replace_with(other)
return other
@transaction.atomic
def replace_with(self, other):
"""
Replace this instance with the given other.
Deletes stale Match objects and updates related TextLogErrorMetadatas'
best_classifications to point to the given other.
"""
match_ids_to_delete = list(self.update_matches(other))
TextLogErrorMatch.objects.filter(id__in=match_ids_to_delete).delete()
# Update best classifications
self.best_for_errors.update(best_classification=other)
self.delete()
def update_matches(self, other):
"""
Update this instance's Matches to point to the given other's Matches.
Find Matches with the same TextLogError as our Matches, updating their
score if less than ours and mark our matches for deletion.
If there are no other matches, update ours to point to the other
ClassifiedFailure.
"""
for match in self.error_matches.all():
other_matches = TextLogErrorMatch.objects.filter(
classified_failure=other,
text_log_error=match.text_log_error,
)
if not other_matches:
match.classified_failure = other
match.save(update_fields=['classified_failure'])
continue
# if any of our matches have higher scores than other's matches,
# overwrite with our score.
other_matches.filter(score__lt=match.score).update(score=match.score)
yield match.id # for deletion
class Meta:
db_table = 'classified_failure'
# TODO delete table once backfill of jobs in TextLogError table has been completed
class TextLogStep(models.Model):
"""
An individual step in the textual (unstructured) log
"""
id = models.BigAutoField(primary_key=True)
job = models.ForeignKey(Job, on_delete=models.CASCADE, related_name="text_log_step")
# these are presently based off of buildbot results
# (and duplicated in treeherder/etl/buildbot.py)
SUCCESS = 0
TEST_FAILED = 1
BUSTED = 2
SKIPPED = 3
EXCEPTION = 4
RETRY = 5
USERCANCEL = 6
UNKNOWN = 7
SUPERSEDED = 8
RESULTS = (
(SUCCESS, 'success'),
(TEST_FAILED, 'testfailed'),
(BUSTED, 'busted'),
(SKIPPED, 'skipped'),
(EXCEPTION, 'exception'),
(RETRY, 'retry'),
(USERCANCEL, 'usercancel'),
(UNKNOWN, 'unknown'),
(SUPERSEDED, 'superseded'),
)
name = models.CharField(max_length=200)
started = models.DateTimeField(null=True)
finished = models.DateTimeField(null=True)
started_line_number = models.PositiveIntegerField()
finished_line_number = models.PositiveIntegerField()
result = models.IntegerField(choices=RESULTS)
class Meta:
db_table = "text_log_step"
unique_together = ('job', 'started_line_number', 'finished_line_number')
class TextLogError(models.Model):
"""
A detected error line in the textual (unstructured) log
"""
id = models.BigAutoField(primary_key=True)
job = models.ForeignKey(Job, on_delete=models.CASCADE, related_name='text_log_error', null=True)
line = models.TextField()
line_number = models.PositiveIntegerField()
# TODO delete this field and unique_together once backfill of jobs in TextLogError table has been completed
step = models.ForeignKey(
TextLogStep, on_delete=models.CASCADE, related_name='errors', null=True
)
class Meta:
db_table = "text_log_error"
unique_together = ('step', 'line_number')
def __str__(self):
return "{0} {1}".format(self.id, self.job.id)
@property
def metadata(self):
try:
return self._metadata
except TextLogErrorMetadata.DoesNotExist:
return None
def bug_suggestions(self):
from treeherder.model import error_summary
return error_summary.bug_suggestions_line(self)
def create_match(self, matcher_name, classification):
"""
Create a TextLogErrorMatch instance
Typically used for manual "matches" or tests.
"""
if classification is None:
classification = ClassifiedFailure.objects.create()
TextLogErrorMatch.objects.create(
text_log_error=self,
classified_failure=classification,
matcher_name=matcher_name,
score=1,
)
def verify_classification(self, classification):
"""
Mark the given ClassifiedFailure as verified.
Handles the classification not currently being related to this
TextLogError and no Metadata existing.
"""
if classification not in self.classified_failures.all():
self.create_match("ManualDetector", classification)
# create a TextLogErrorMetadata instance for this TextLogError if it
# doesn't exist. We can't use update_or_create here since OneToOne
# relations don't use an object manager so a missing relation is simply
# None as opposed to RelatedManager.
if self.metadata is None:
TextLogErrorMetadata.objects.create(
text_log_error=self, best_classification=classification, best_is_verified=True
)
else:
self.metadata.best_classification = classification
self.metadata.best_is_verified = True
self.metadata.save(update_fields=['best_classification', 'best_is_verified'])
# Send event to NewRelic when a verifing an autoclassified failure.
match = self.matches.filter(classified_failure=classification).first()
if not match:
return
newrelic.agent.record_custom_event(
'user_verified_classification',
{
'matcher': match.matcher_name,
'job_id': self.id,
},
)
def get_failure_line(self):
"""Get a related FailureLine instance if one exists."""
try:
return self.metadata.failure_line
except AttributeError:
return None
class TextLogErrorMetadata(models.Model):
"""
Link matching TextLogError and FailureLine instances.
Tracks best classification and verificiation of a classification.
TODO: Merge into TextLogError.
"""
text_log_error = models.OneToOneField(
TextLogError, primary_key=True, related_name="_metadata", on_delete=models.CASCADE
)
failure_line = models.OneToOneField(
FailureLine, on_delete=models.CASCADE, related_name="text_log_error_metadata", null=True
)
# Note that the case of best_classification = None and best_is_verified = True
# has the special semantic that the line is ignored and should not be considered
# for future autoclassifications.
best_classification = models.ForeignKey(
ClassifiedFailure, related_name="best_for_errors", null=True, on_delete=models.SET_NULL
)
best_is_verified = models.BooleanField(default=False)
class Meta:
db_table = "text_log_error_metadata"
def __str__(self):
args = (self.text_log_error_id, self.failure_line_id)
return 'TextLogError={} FailureLine={}'.format(*args)
class TextLogErrorMatch(models.Model):
"""Association table between TextLogError and ClassifiedFailure, containing
additional data about the association including the matcher that was used
to create it and a score in the range 0-1 for the goodness of match."""
id = models.BigAutoField(primary_key=True)
text_log_error = models.ForeignKey(
TextLogError, related_name="matches", on_delete=models.CASCADE
)
classified_failure = models.ForeignKey(
ClassifiedFailure, related_name="error_matches", on_delete=models.CASCADE
)
matcher_name = models.CharField(max_length=255)
score = models.DecimalField(max_digits=3, decimal_places=2, blank=True, null=True)
class Meta:
db_table = 'text_log_error_match'
verbose_name_plural = 'text log error matches'
unique_together = ('text_log_error', 'classified_failure', 'matcher_name')
def __str__(self):
return "{0} {1}".format(self.text_log_error.id, self.classified_failure.id)
class InvestigatedTests(models.Model):
"""Tests that have been marked as investigated in the Push health UI. These act like
completed To Do List items."""
id = models.BigAutoField(primary_key=True)
job_type = models.ForeignKey(JobType, related_name="investigated", on_delete=models.CASCADE)
test = models.CharField(max_length=350)
push = models.ForeignKey(Push, on_delete=models.CASCADE)
class Meta:
unique_together = ["job_type", "test", "push"]
db_table = 'investigated_tests'
class MozciClassification(models.Model):
"""
Automated classification of a Push provided by mozci
"""
BAD = 'BAD'
GOOD = 'GOOD'
UNKNOWN = 'UNKNOWN'
CLASSIFICATION_RESULT = (
(BAD, 'bad'),
(GOOD, 'good'),
(UNKNOWN, 'unknown'),
)
id = models.BigAutoField(primary_key=True)
push = models.ForeignKey(Push, on_delete=models.CASCADE)
result = models.CharField(max_length=7, choices=CLASSIFICATION_RESULT)
created = models.DateTimeField(default=timezone.now)
task_id = models.CharField(max_length=22, validators=[MinLengthValidator(22)])
class Meta:
db_table = 'mozci_classification'
|
jmaher/treeherder
|
treeherder/model/models.py
|
Python
|
mpl-2.0
| 51,534
|
# -*- coding: utf-8 -*-
from south.utils import datetime_utils as datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
db.execute(
"update physical_script set metric_collector = 'metric_collector_configuration.sh';COMMIT;")
def backwards(self, orm):
pass
models = {
u'physical.databaseinfra': {
'Meta': {'object_name': 'DatabaseInfra'},
'capacity': ('django.db.models.fields.PositiveIntegerField', [], {'default': '1'}),
'created_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'database_key': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'disk_offering': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "u'databaseinfras'", 'null': 'True', 'on_delete': 'models.PROTECT', 'to': u"orm['physical.DiskOffering']"}),
'endpoint': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'endpoint_dns': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'engine': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "u'databaseinfras'", 'on_delete': 'models.PROTECT', 'to': u"orm['physical.Engine']"}),
'environment': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "u'databaseinfras'", 'on_delete': 'models.PROTECT', 'to': u"orm['physical.Environment']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'last_vm_created': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '100'}),
'name_prefix': ('django.db.models.fields.CharField', [], {'max_length': '10', 'null': 'True', 'blank': 'True'}),
'name_stamp': ('django.db.models.fields.CharField', [], {'max_length': '20', 'null': 'True', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '406', 'blank': 'True'}),
'per_database_size_mbytes': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'plan': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "u'databaseinfras'", 'on_delete': 'models.PROTECT', 'to': u"orm['physical.Plan']"}),
'ssl_configured': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'updated_at': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'user': ('django.db.models.fields.CharField', [], {'max_length': '100', 'blank': 'True'})
},
u'physical.databaseinfraparameter': {
'Meta': {'unique_together': "((u'databaseinfra', u'parameter'),)", 'object_name': 'DatabaseInfraParameter'},
'applied_on_database': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'created_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'current_value': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'databaseinfra': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['physical.DatabaseInfra']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'parameter': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['physical.Parameter']"}),
'reset_default_value': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'updated_at': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'value': ('django.db.models.fields.CharField', [], {'max_length': '200'})
},
u'physical.diskoffering': {
'Meta': {'object_name': 'DiskOffering'},
'created_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '255'}),
'size_kb': ('django.db.models.fields.PositiveIntegerField', [], {}),
'updated_at': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'})
},
u'physical.engine': {
'Meta': {'ordering': "(u'engine_type__name', u'version')", 'unique_together': "((u'version', u'engine_type'),)", 'object_name': 'Engine'},
'created_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'engine_type': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "u'engines'", 'on_delete': 'models.PROTECT', 'to': u"orm['physical.EngineType']"}),
'engine_upgrade_option': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "u'backwards_engine'", 'null': 'True', 'on_delete': 'models.SET_NULL', 'to': u"orm['physical.Engine']"}),
'has_users': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'path': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'read_node_description': ('django.db.models.fields.CharField', [], {'default': "u''", 'max_length': '100', 'null': 'True', 'blank': 'True'}),
'template_name': ('django.db.models.fields.CharField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'}),
'updated_at': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'user_data_script': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'version': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'write_node_description': ('django.db.models.fields.CharField', [], {'default': "u''", 'max_length': '100', 'null': 'True', 'blank': 'True'})
},
u'physical.enginetype': {
'Meta': {'ordering': "(u'name',)", 'object_name': 'EngineType'},
'created_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_in_memory': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '100'}),
'updated_at': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'})
},
u'physical.environment': {
'Meta': {'object_name': 'Environment'},
'created_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'migrate_environment': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "u'migrate_to'", 'null': 'True', 'to': u"orm['physical.Environment']"}),
'min_of_zones': ('django.db.models.fields.PositiveIntegerField', [], {'default': '1'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '100'}),
'updated_at': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'})
},
u'physical.host': {
'Meta': {'object_name': 'Host'},
'address': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'created_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'future_host': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['physical.Host']", 'null': 'True', 'on_delete': 'models.SET_NULL', 'blank': 'True'}),
'hostname': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '255'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'identifier': ('django.db.models.fields.CharField', [], {'default': "u''", 'max_length': '255'}),
'monitor_url': ('django.db.models.fields.URLField', [], {'max_length': '500', 'null': 'True', 'blank': 'True'}),
'offering': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['physical.Offering']", 'null': 'True'}),
'os_description': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '406', 'null': 'True', 'blank': 'True'}),
'updated_at': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'user': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'})
},
u'physical.instance': {
'Meta': {'unique_together': "((u'address', u'port'),)", 'object_name': 'Instance'},
'address': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'created_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'databaseinfra': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "u'instances'", 'to': u"orm['physical.DatabaseInfra']"}),
'dns': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'future_instance': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['physical.Instance']", 'null': 'True', 'on_delete': 'models.SET_NULL', 'blank': 'True'}),
'hostname': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "u'instances'", 'to': u"orm['physical.Host']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'instance_type': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'port': ('django.db.models.fields.IntegerField', [], {}),
'read_only': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'shard': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'status': ('django.db.models.fields.IntegerField', [], {'default': '2'}),
'total_size_in_bytes': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'updated_at': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'used_size_in_bytes': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'})
},
u'physical.offering': {
'Meta': {'object_name': 'Offering'},
'cpus': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'created_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'environments': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "u'offerings'", 'symmetrical': 'False', 'to': u"orm['physical.Environment']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'memory_size_mb': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'updated_at': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'})
},
u'physical.parameter': {
'Meta': {'ordering': "(u'engine_type__name', u'name')", 'unique_together': "((u'name', u'engine_type'),)", 'object_name': 'Parameter'},
'allowed_values': ('django.db.models.fields.CharField', [], {'default': "u''", 'max_length': '200', 'null': 'True', 'blank': 'True'}),
'created_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'custom_method': ('django.db.models.fields.CharField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'}),
'description': ('django.db.models.fields.TextField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'}),
'dynamic': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'engine_type': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "u'enginetype'", 'on_delete': 'models.PROTECT', 'to': u"orm['physical.EngineType']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'parameter_type': ('django.db.models.fields.CharField', [], {'default': "u''", 'max_length': '100'}),
'updated_at': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'})
},
u'physical.plan': {
'Meta': {'object_name': 'Plan'},
'created_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'description': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'disk_offering': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "u'plans'", 'null': 'True', 'on_delete': 'models.PROTECT', 'to': u"orm['physical.DiskOffering']"}),
'engine': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "u'plans'", 'to': u"orm['physical.Engine']"}),
'engine_equivalent_plan': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "u'backwards_plan'", 'null': 'True', 'on_delete': 'models.SET_NULL', 'to': u"orm['physical.Plan']"}),
'environments': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "u'plans'", 'symmetrical': 'False', 'to': u"orm['physical.Environment']"}),
'has_persistence': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_ha': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'max_db_size': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'migrate_plan': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "u'migrate_to'", 'null': 'True', 'to': u"orm['physical.Plan']"}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '100'}),
'provider': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'replication_topology': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "u'replication_topology'", 'null': 'True', 'to': u"orm['physical.ReplicationTopology']"}),
'stronger_offering': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "u'main_offerings'", 'null': 'True', 'to': u"orm['physical.Offering']"}),
'updated_at': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'weaker_offering': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "u'weaker_offerings'", 'null': 'True', 'to': u"orm['physical.Offering']"})
},
u'physical.planattribute': {
'Meta': {'object_name': 'PlanAttribute'},
'created_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'plan': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "u'plan_attributes'", 'to': u"orm['physical.Plan']"}),
'updated_at': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'value': ('django.db.models.fields.CharField', [], {'max_length': '200'})
},
u'physical.replicationtopology': {
'Meta': {'object_name': 'ReplicationTopology'},
'can_change_parameters': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'can_clone_db': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'can_reinstall_vm': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'can_resize_vm': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'can_setup_ssl': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'can_switch_master': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'can_upgrade_db': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'class_path': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'created_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'details': ('django.db.models.fields.CharField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'}),
'engine': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "u'replication_topologies'", 'symmetrical': 'False', 'to': u"orm['physical.Engine']"}),
'has_horizontal_scalability': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'parameter': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "u'replication_topologies'", 'blank': 'True', 'to': u"orm['physical.Parameter']"}),
'script': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "u'replication_topologies'", 'null': 'True', 'to': u"orm['physical.Script']"}),
'updated_at': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'})
},
u'physical.script': {
'Meta': {'object_name': 'Script'},
'configuration': ('django.db.models.fields.CharField', [], {'max_length': '300'}),
'created_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'initialization': ('django.db.models.fields.CharField', [], {'max_length': '300'}),
'metric_collector': ('django.db.models.fields.CharField', [], {'max_length': '300', 'null': 'True', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'start_database': ('django.db.models.fields.CharField', [], {'max_length': '300'}),
'start_replication': ('django.db.models.fields.CharField', [], {'max_length': '300', 'null': 'True', 'blank': 'True'}),
'updated_at': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'})
},
u'physical.topologyparametercustomvalue': {
'Meta': {'unique_together': "((u'topology', u'parameter'),)", 'object_name': 'TopologyParameterCustomValue'},
'attr_name': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'created_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'parameter': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "u'topology_custom_values'", 'to': u"orm['physical.Parameter']"}),
'topology': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "u'param_custom_values'", 'to': u"orm['physical.ReplicationTopology']"}),
'updated_at': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'})
},
u'physical.volume': {
'Meta': {'object_name': 'Volume'},
'created_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'host': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "u'volumes'", 'to': u"orm['physical.Host']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'identifier': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'total_size_kb': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'updated_at': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'used_size_kb': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'})
}
}
complete_apps = ['physical']
|
globocom/database-as-a-service
|
dbaas/physical/migrations/0068_update_physical_script.py
|
Python
|
bsd-3-clause
| 21,797
|
import re
from pyramid.view import view_config
from contentbase import (
Collection,
TYPES,
collection_view_listing_db,
)
from contentbase.elasticsearch import ELASTIC_SEARCH
from pyramid.security import effective_principals
from urllib.parse import urlencode
from collections import OrderedDict
def includeme(config):
config.add_route('search', '/search{slash:/?}')
config.scan(__name__)
sanitize_search_string_re = re.compile(r'[\\\+\-\&\|\!\(\)\{\}\[\]\^\~\:\/\\\*\?]')
hgConnect = ''.join([
'http://genome.ucsc.edu/cgi-bin/hgHubConnect',
'?hgHub_do_redirect=on',
'&hgHubConnect.remakeTrackHub=on',
'&hgHub_do_firstDb=1',
'&hubUrl=',
])
audit_facets = [
('audit.ERROR.category', {'title': 'Audit category: ERROR'}),
('audit.NOT_COMPLIANT.category', {'title': 'Audit category: NOT COMPLIANT'}),
('audit.WARNING.category', {'title': 'Audit category: WARNING'}),
('audit.DCC_ACTION.category', {'title': 'Audit category: DCC ACTION'})
]
def get_filtered_query(term, search_fields, result_fields, principals):
return {
'query': {
'query_string': {
'query': term,
'fields': search_fields,
'default_operator': 'AND'
}
},
'filter': {
'and': {
'filters': [
{
'terms': {
'principals_allowed.view': principals
}
}
]
}
},
'aggs': {},
'_source': list(result_fields),
}
def sanitize_search_string(text):
return sanitize_search_string_re.sub(r'\\\g<0>', text)
def get_sort_order():
"""
specifies sort order for elasticsearch results
"""
return {
'embedded.date_created': {
'order': 'desc',
'ignore_unmapped': True,
}
}
def get_search_fields(request, doc_types):
"""
Returns set of columns that are being searched and highlights
"""
fields = set()
highlights = {}
for doc_type in (doc_types or request.root.by_item_type.keys()):
collection = request.root[doc_type]
for value in collection.type_info.schema.get('boost_values', ()):
fields.add('embedded.' + value)
highlights['embedded.' + value] = {}
return fields, highlights
def load_columns(request, doc_types, result):
"""
Returns fields that are requested by user or default fields
"""
frame = request.params.get('frame')
fields_requested = request.params.getall('field')
if fields_requested:
fields = {'embedded.@id', 'embedded.@type'}
fields.update('embedded.' + field for field in fields_requested)
elif frame in ['embedded', 'object']:
fields = [frame + '.*']
else:
frame = 'columns'
fields = set()
if request.has_permission('search_audit'):
fields.add('audit.*')
for doc_type in (doc_types or request.root.by_item_type.keys()):
collection = request.root[doc_type]
if 'columns' not in (collection.type_info.schema or ()):
fields.add('object.*')
else:
columns = collection.type_info.schema['columns']
fields.update(
('embedded.@id', 'embedded.@type'),
('embedded.' + column for column in columns),
)
result['columns'].update(columns)
return fields
def set_filters(request, query, result):
"""
Sets filters in the query
"""
query_filters = query['filter']['and']['filters']
used_filters = {}
for field, term in request.params.items():
if field in ['type', 'limit', 'mode', 'searchTerm',
'format', 'frame', 'datastore', 'field']:
continue
# Add filter to result
qs = urlencode([
(k.encode('utf-8'), v.encode('utf-8'))
for k, v in request.params.items() if v != term
])
result['filters'].append({
'field': field,
'term': term,
'remove': '{}?{}'.format(request.path, qs)
})
# Add filter to query
if field.startswith('audit'):
query_field = field
else:
query_field = 'embedded.' + field + '.raw'
if field.endswith('!'):
if field not in used_filters:
# Setting not filter instead of terms filter
query_filters.append({
'not': {
'terms': {
'embedded.' + field[:-1] + '.raw': [term],
}
}
})
query_terms = used_filters[field] = []
else:
query_filters.remove({
'not': {
'terms': {
'embedded.' + field[:-1] + '.raw': used_filters[field]
}
}
})
used_filters[field].append(term)
query_filters.append({
'not': {
'terms': {
'embedded.' + field[:-1] + '.raw': used_filters[field]
}
}
})
else:
if field not in used_filters:
query_terms = used_filters[field] = []
query_filters.append({
'terms': {
query_field: query_terms,
}
})
else:
query_filters.remove({
'terms': {
query_field: used_filters[field]
}
})
used_filters[field].append(term)
query_filters.append({
'terms': {
query_field: used_filters[field]
}
})
used_filters[field].append(term)
return used_filters
def set_facets(facets, used_filters, query, principals):
"""
Sets facets in the query using filters
"""
for field, _ in facets:
if field == 'type':
query_field = '_type'
elif field.startswith('audit'):
query_field = field
else:
query_field = 'embedded.' + field + '.raw'
agg_name = field.replace('.', '-')
terms = []
# Adding facets based on filters
for q_field, q_terms in used_filters.items():
if q_field != field and q_field.startswith('audit'):
terms.append({'terms': {q_field: q_terms}})
elif q_field != field and not q_field.endswith('!'):
terms.append({'terms': {'embedded.' + q_field + '.raw': q_terms}})
elif q_field != field and q_field.endswith('!'):
terms.append({'not': {'terms': {'embedded.' + q_field[:-1] + '.raw': q_terms}}})
terms.append(
{'terms': {'principals_allowed.view': principals}}
)
query['aggs'][agg_name] = {
'aggs': {
agg_name: {
'terms': {
'field': query_field,
'min_doc_count': 0,
'size': 100
}
}
},
'filter': {
'bool': {
'must': terms,
},
},
}
def load_results(request, es_results, result):
"""
Loads results to pass onto UI
"""
hits = es_results['hits']['hits']
frame = request.params.get('frame')
fields_requested = request.params.getall('field')
if frame in ['embedded', 'object'] and not len(fields_requested):
result['@graph'] = [hit['_source'][frame] for hit in hits]
elif fields_requested:
result['@graph'] = [hit['_source']['embedded'] for hit in hits]
else: # columns
for hit in hits:
item_type = hit['_type']
if 'columns' in request.registry[TYPES][item_type].schema:
item = hit['_source']['embedded']
else:
item = hit['_source']['object']
if 'audit' in hit['_source']:
item['audit'] = hit['_source']['audit']
if 'highlight' in hit:
item['highlight'] = {}
for key in hit['highlight']:
item['highlight'][key[9:]] = list(set(hit['highlight'][key]))
result['@graph'].append(item)
@view_config(route_name='search', request_method='GET', permission='search')
def search(context, request, search_type=None):
"""
Search view connects to ElasticSearch and returns the results
"""
root = request.root
types = request.registry[TYPES]
result = {
'@id': '/search/' + ('?' + request.query_string if request.query_string else ''),
'@type': ['search'],
'title': 'Search',
'facets': [],
'@graph': [],
'columns': OrderedDict(),
'filters': [],
'notification': '',
}
principals = effective_principals(request)
es = request.registry[ELASTIC_SEARCH]
es_index = request.registry.settings['contentbase.elasticsearch.index']
search_audit = request.has_permission('search_audit')
# handling limit
size = request.params.get('limit', 25)
if size in ('all', ''):
size = 99999
else:
try:
size = int(size)
except ValueError:
size = 25
search_term = request.params.get('searchTerm', '*')
if search_term != '*':
search_term = sanitize_search_string(search_term.strip())
search_term_array = search_term.split()
if search_term_array[len(search_term_array) - 1] in ['AND', 'NOT', 'OR']:
del search_term_array[-1]
search_term = ' '.join(search_term_array)
# Handling whitespaces in the search term
if not search_term:
result['notification'] = 'Please enter search term'
return result
if search_type is None:
doc_types = request.params.getall('type')
if '*' in doc_types:
doc_types = []
# handling invalid item types
bad_types = [t for t in doc_types if t not in root.by_item_type]
if bad_types:
result['notification'] = "Invalid type: %s" ', '.join(bad_types)
return result
else:
doc_types = [search_type]
# Building query for filters
if not doc_types:
if request.params.get('mode') == 'picker':
doc_types = []
else:
doc_types = ['gene', 'orphaPhenotype', 'article', 'variant', 'gdm', 'annotation',
'group', 'family', 'individual', 'experimental', 'assessment',
'interpretation']
else:
for item_type in doc_types:
qs = urlencode([
(k.encode('utf-8'), v.encode('utf-8'))
for k, v in request.params.items() if k != 'type' and v != item_type
])
result['filters'].append({
'field': 'type',
'term': item_type,
'remove': '{}?{}'.format(request.path, qs)
})
search_fields, highlights = get_search_fields(request, doc_types)
# Builds filtered query which supports multiple facet selection
query = get_filtered_query(search_term,
search_fields,
sorted(load_columns(request, doc_types, result)),
principals)
if not result['columns']:
del result['columns']
# Sorting the files when search term is not specified
if search_term == '*':
query['sort'] = get_sort_order()
query['query']['match_all'] = {}
del query['query']['query_string']
elif len(doc_types) != 1:
del query['query']['query_string']['fields']
# elif size <= 25:
# # highlight only when search type, search term and size are specified
# query['highlight'] = {
# 'order': 'score',
# 'fields': highlights
# }
# Setting filters
used_filters = set_filters(request, query, result)
# Adding facets to the query
facets = [
('type', {'title': 'Data Type'}),
]
if len(doc_types) == 1 and 'facets' in types[doc_types[0]].schema:
facets.extend(types[doc_types[0]].schema['facets'].items())
if search_audit:
for audit_facet in audit_facets:
facets.append(audit_facet)
set_facets(facets, used_filters, query, principals)
if doc_types == ['gdm'] or doc_types == ['interpretation']:
size = 99999
# Execute the query
es_results = es.search(body=query, index=es_index,
doc_type=doc_types or None, size=size)
# Loading facets in to the results
if 'aggregations' in es_results:
facet_results = es_results['aggregations']
for field, facet in facets:
agg_name = field.replace('.', '-')
if agg_name not in facet_results:
continue
terms = facet_results[agg_name][agg_name]['buckets']
if len(terms) < 2:
continue
result['facets'].append({
'field': field,
'title': facet['title'],
'terms': terms,
'total': facet_results[agg_name]['doc_count']
})
# generate batch hub URL for experiments
if doc_types == ['experiment'] and any(
facet['doc_count'] > 0
for facet in es_results['aggregations']['assembly']['assembly']['buckets']):
search_params = request.query_string.replace('&', ',,')
hub = request.route_url('batch_hub',
search_params=search_params,
txt='hub.txt')
result['batch_hub'] = hgConnect + hub
# generate batch download URL for experiments
if doc_types == ['experiment']:
result['batch_download'] = request.route_url(
'batch_download',
search_params=request.query_string
)
# Moved to a seperate method to make code readable
load_results(request, es_results, result)
# Adding total
result['total'] = es_results['hits']['total']
result['notification'] = 'Success' if result['total'] else 'No results found'
return result
@view_config(context=Collection, permission='list', request_method='GET',
name='listing')
def collection_view_listing_es(context, request):
# Switch to change summary page loading options
if request.datastore != 'elasticsearch':
return collection_view_listing_db(context, request)
result = search(context, request, context.item_type)
if len(result['@graph']) < result['total']:
params = [(k, v) for k, v in request.params.items() if k != 'limit']
params.append(('limit', 'all'))
result['all'] = '%s?%s' % (request.resource_path(context), urlencode(params))
return result
|
ClinGen/clincoded
|
src/clincoded/search.py
|
Python
|
mit
| 15,299
|
"""
This config file runs the simplest dev environment"""
# We intentionally define lots of variables that aren't used, and
# want to import all variables from base settings files
# pylint: disable=W0401, W0614
from .common import *
from logsettings import get_logger_config
DEBUG = True
TEMPLATE_DEBUG = DEBUG
LOGGING = get_logger_config(ENV_ROOT / "log",
logging_env="dev",
tracking_filename="tracking.log",
dev_env=True,
debug=True)
modulestore_options = {
'default_class': 'xmodule.raw_module.RawDescriptor',
'fs_root': GITHUB_REPO_ROOT,
'render_template': 'edxmako.shortcuts.render_to_string',
}
MODULESTORE = {
'default': {
'ENGINE': 'xmodule.modulestore.draft.DraftModuleStore',
'DOC_STORE_CONFIG': DOC_STORE_CONFIG,
'OPTIONS': modulestore_options
},
'direct': {
'ENGINE': 'xmodule.modulestore.mongo.MongoModuleStore',
'DOC_STORE_CONFIG': DOC_STORE_CONFIG,
'OPTIONS': modulestore_options
},
'split': {
'ENGINE': 'xmodule.modulestore.split_mongo.SplitMongoModuleStore',
'DOC_STORE_CONFIG': DOC_STORE_CONFIG,
'OPTIONS': modulestore_options
}
}
# cdodge: This is the specifier for the MongoDB (using GridFS) backed static content store
# This is for static content for courseware, not system static content (e.g. javascript, css, edX branding, etc)
CONTENTSTORE = {
'ENGINE': 'xmodule.contentstore.mongo.MongoContentStore',
'DOC_STORE_CONFIG': {
'host': 'localhost',
'db': 'xcontent',
},
# allow for additional options that can be keyed on a name, e.g. 'trashcan'
'ADDITIONAL_OPTIONS': {
'trashcan': {
'bucket': 'trash_fs'
}
}
}
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': ENV_ROOT / "db" / "edx.db",
}
}
LMS_BASE = "mooc.xiaodun.cn"
FEATURES['PREVIEW_LMS_BASE'] = "mooc.xiaodun.cn"
REPOS = {
'edx4edx': {
'branch': 'master',
'origin': 'git@github.com:MITx/edx4edx.git',
},
'content-mit-6002x': {
'branch': 'master',
# 'origin': 'git@github.com:MITx/6002x-fall-2012.git',
'origin': 'git@github.com:MITx/content-mit-6002x.git',
},
'6.00x': {
'branch': 'master',
'origin': 'git@github.com:MITx/6.00x.git',
},
'7.00x': {
'branch': 'master',
'origin': 'git@github.com:MITx/7.00x.git',
},
'3.091x': {
'branch': 'master',
'origin': 'git@github.com:MITx/3.091x.git',
},
}
CACHES = {
# This is the cache used for most things. Askbot will not work without a
# functioning cache -- it relies on caching to load its settings in places.
# In staging/prod envs, the sessions also live here.
'default': {
'BACKEND': 'django.core.cache.backends.locmem.LocMemCache',
'LOCATION': 'edx_loc_mem_cache',
'KEY_FUNCTION': 'util.memcache.safe_key',
},
# The general cache is what you get if you use our util.cache. It's used for
# things like caching the course.xml file for different A/B test groups.
# We set it to be a DummyCache to force reloading of course.xml in dev.
# In staging environments, we would grab VERSION from data uploaded by the
# push process.
'general': {
'BACKEND': 'django.core.cache.backends.dummy.DummyCache',
'KEY_PREFIX': 'general',
'VERSION': 4,
'KEY_FUNCTION': 'util.memcache.safe_key',
},
'mongo_metadata_inheritance': {
'BACKEND': 'django.core.cache.backends.filebased.FileBasedCache',
'LOCATION': '/var/tmp/mongo_metadata_inheritance',
'TIMEOUT': 300,
'KEY_FUNCTION': 'util.memcache.safe_key',
},
'loc_cache': {
'BACKEND': 'django.core.cache.backends.locmem.LocMemCache',
'LOCATION': 'edx_location_mem_cache',
},
}
# Make the keyedcache startup warnings go away
CACHE_TIMEOUT = 0
# Dummy secret key for dev
SECRET_KEY = '85920908f28904ed733fe576320db18cabd7b6cd'
################################ PIPELINE #################################
PIPELINE_SASS_ARGUMENTS = '--debug-info --require {proj_dir}/static/sass/bourbon/lib/bourbon.rb'.format(proj_dir=PROJECT_ROOT)
################################# CELERY ######################################
# By default don't use a worker, execute tasks as if they were local functions
CELERY_ALWAYS_EAGER = True
################################ DEBUG TOOLBAR #################################
INSTALLED_APPS += ('debug_toolbar', 'debug_toolbar_mongo')
MIDDLEWARE_CLASSES += ('debug_toolbar.middleware.DebugToolbarMiddleware',)
INTERNAL_IPS = ('127.0.0.1',)
DEBUG_TOOLBAR_PANELS = (
'debug_toolbar.panels.version.VersionDebugPanel',
'debug_toolbar.panels.timer.TimerDebugPanel',
'debug_toolbar.panels.settings_vars.SettingsVarsDebugPanel',
'debug_toolbar.panels.headers.HeaderDebugPanel',
'debug_toolbar.panels.request_vars.RequestVarsDebugPanel',
'debug_toolbar.panels.sql.SQLDebugPanel',
'debug_toolbar.panels.signals.SignalDebugPanel',
'debug_toolbar.panels.logger.LoggingPanel',
# Enabling the profiler has a weird bug as of django-debug-toolbar==0.9.4 and
# Django=1.3.1/1.4 where requests to views get duplicated (your method gets
# hit twice). So you can uncomment when you need to diagnose performance
# problems, but you shouldn't leave it on.
# 'debug_toolbar.panels.profiling.ProfilingDebugPanel',
)
DEBUG_TOOLBAR_CONFIG = {
'INTERCEPT_REDIRECTS': False
}
# To see stacktraces for MongoDB queries, set this to True.
# Stacktraces slow down page loads drastically (for pages with lots of queries).
DEBUG_TOOLBAR_MONGO_STACKTRACES = False
# disable NPS survey in dev mode
FEATURES['STUDIO_NPS_SURVEY'] = False
# Enable URL that shows information about the status of variuous services
FEATURES['ENABLE_SERVICE_STATUS'] = True
############################# SEGMENT-IO ##################################
# If there's an environment variable set, grab it and turn on Segment.io
# Note that this is the Studio key. There is a separate key for the LMS.
import os
SEGMENT_IO_KEY = os.environ.get('SEGMENT_IO_KEY')
if SEGMENT_IO_KEY:
FEATURES['SEGMENT_IO'] = True
#####################################################################
# Lastly, see if the developer has any local overrides.
try:
from .private import * # pylint: disable=F0401
except ImportError:
pass
|
XiaodunServerGroup/xiaodun-platform
|
cms/envs/dev.py
|
Python
|
agpl-3.0
| 6,564
|
"""
Helper functions for loading environment settings.
"""
from __future__ import print_function
import os
import sys
import json
from lazy import lazy
from path import path
import memcache
class Env(object):
"""
Load information about the execution environment.
"""
# Root of the git repository (edx-platform)
REPO_ROOT = path(__file__).abspath().parent.parent.parent
# Reports Directory
REPORT_DIR = REPO_ROOT / 'reports'
METRICS_DIR = REPORT_DIR / 'metrics'
# Python unittest dirs
PYTHON_COVERAGERC = REPO_ROOT / ".coveragerc"
# Bok_choy dirs
BOK_CHOY_DIR = REPO_ROOT / "common" / "test" / "acceptance"
BOK_CHOY_LOG_DIR = REPO_ROOT / "test_root" / "log"
BOK_CHOY_REPORT_DIR = REPORT_DIR / "bok_choy"
BOK_CHOY_COVERAGERC = BOK_CHOY_DIR / ".coveragerc"
# If set, put reports for run in "unique" directories.
# The main purpose of this is to ensure that the reports can be 'slurped'
# in the main jenkins flow job without overwriting the reports from other
# build steps. For local development/testing, this shouldn't be needed.
if os.environ.get("SHARD", None):
shard_str = "shard_{}".format(os.environ.get("SHARD"))
BOK_CHOY_REPORT_DIR = BOK_CHOY_REPORT_DIR / shard_str
BOK_CHOY_LOG_DIR = BOK_CHOY_LOG_DIR / shard_str
# For the time being, stubs are used by both the bok-choy and lettuce acceptance tests
# For this reason, the stubs package is currently located in the Django app called "terrain"
# where other lettuce configuration is stored.
BOK_CHOY_STUB_DIR = REPO_ROOT / "common" / "djangoapps" / "terrain"
# Directory that videos are served from
VIDEO_SOURCE_DIR = REPO_ROOT / "test_root" / "data" / "video"
BOK_CHOY_SERVERS = {
'lms': {
'port': 8003,
'log': BOK_CHOY_LOG_DIR / "bok_choy_lms.log"
},
'cms': {
'port': 8031,
'log': BOK_CHOY_LOG_DIR / "bok_choy_studio.log"
}
}
BOK_CHOY_STUBS = {
'xqueue': {
'port': 8040,
'log': BOK_CHOY_LOG_DIR / "bok_choy_xqueue.log",
'config': 'register_submission_url=http://0.0.0.0:8041/test/register_submission',
},
'ora': {
'port': 8041,
'log': BOK_CHOY_LOG_DIR / "bok_choy_ora.log",
'config': '',
},
'comments': {
'port': 4567,
'log': BOK_CHOY_LOG_DIR / "bok_choy_comments.log",
},
'video': {
'port': 8777,
'log': BOK_CHOY_LOG_DIR / "bok_choy_video_sources.log",
'config': "root_dir={}".format(VIDEO_SOURCE_DIR),
},
'youtube': {
'port': 9080,
'log': BOK_CHOY_LOG_DIR / "bok_choy_youtube.log",
},
'edxnotes': {
'port': 8042,
'log': BOK_CHOY_LOG_DIR / "bok_choy_edxnotes.log",
}
}
# Mongo databases that will be dropped before/after the tests run
BOK_CHOY_MONGO_DATABASE = "test"
BOK_CHOY_CACHE = memcache.Client(['0.0.0.0:11211'], debug=0)
# Test Ids Directory
TEST_DIR = REPO_ROOT / ".testids"
# Files used to run each of the js test suites
# TODO: Store this as a dict. Order seems to matter for some
# reason. See issue TE-415.
JS_TEST_ID_FILES = [
REPO_ROOT / 'lms/static/js_test.yml',
REPO_ROOT / 'lms/static/js_test_coffee.yml',
REPO_ROOT / 'cms/static/js_test.yml',
REPO_ROOT / 'cms/static/js_test_squire.yml',
REPO_ROOT / 'common/lib/xmodule/xmodule/js/js_test.yml',
REPO_ROOT / 'common/static/js_test.yml',
REPO_ROOT / 'common/static/js_test_requirejs.yml',
]
JS_TEST_ID_KEYS = [
'lms',
'lms-coffee',
'cms',
'cms-squire',
'xmodule',
'common',
'common-requirejs'
]
JS_REPORT_DIR = REPORT_DIR / 'javascript'
# Directories used for common/lib/ tests
LIB_TEST_DIRS = []
for item in (REPO_ROOT / "common/lib").listdir():
if (REPO_ROOT / 'common/lib' / item).isdir():
LIB_TEST_DIRS.append(path("common/lib") / item.basename())
LIB_TEST_DIRS.append(path("pavelib/paver_tests"))
# Directory for i18n test reports
I18N_REPORT_DIR = REPORT_DIR / 'i18n'
# Service variant (lms, cms, etc.) configured with an environment variable
# We use this to determine which envs.json file to load.
SERVICE_VARIANT = os.environ.get('SERVICE_VARIANT', None)
# If service variant not configured in env, then pass the correct
# environment for lms / cms
if not SERVICE_VARIANT: # this will intentionally catch "";
if any(i in sys.argv[1:] for i in ('cms', 'studio')):
SERVICE_VARIANT = 'cms'
else:
SERVICE_VARIANT = 'lms'
@lazy
def env_tokens(self):
"""
Return a dict of environment settings.
If we couldn't find the JSON file, issue a warning and return an empty dict.
"""
# Find the env JSON file
if self.SERVICE_VARIANT:
env_path = self.REPO_ROOT.parent / "{service}.env.json".format(service=self.SERVICE_VARIANT)
else:
env_path = path("env.json").abspath()
# If the file does not exist, here or one level up,
# issue a warning and return an empty dict
if not env_path.isfile():
env_path = env_path.parent.parent / env_path.basename()
if not env_path.isfile():
print(
"Warning: could not find environment JSON file "
"at '{path}'".format(path=env_path),
file=sys.stderr,
)
return dict()
# Otherwise, load the file as JSON and return the resulting dict
try:
with open(env_path) as env_file:
return json.load(env_file)
except ValueError:
print(
"Error: Could not parse JSON "
"in {path}".format(path=env_path),
file=sys.stderr,
)
sys.exit(1)
@lazy
def feature_flags(self):
"""
Return a dictionary of feature flags configured by the environment.
"""
return self.env_tokens.get('FEATURES', dict())
|
htzy/bigfour
|
pavelib/utils/envs.py
|
Python
|
agpl-3.0
| 6,325
|
# coding: utf-8
try:
from django.conf.urls import patterns, url, include
except ImportError:
from django.conf.urls.defaults import patterns, url, include
from django.http import HttpResponse
def dummy(request):
return HttpResponse()
urlpatterns = patterns('',
url('^api/.+/$', dummy, name='dummy'),
url('', include('django.contrib.auth.urls', app_name='auth', namespace='auth'))
)
|
anmekin/django-httplog
|
test_app/urls.py
|
Python
|
bsd-3-clause
| 405
|
from __future__ import absolute_import, unicode_literals
# This will make sure the app is always imported when
# Django starts so that shared_task will use this app.
from .celery import app as celery_app # noqa
# NOTE: Version must be updated in docs/source/conf.py as well.
VERSION = (1, 4, 2, "final")
def get_version(version):
assert len(version) == 4, "Version must be formatted as (major, minor, micro, state)"
major, minor, micro, state = version
assert isinstance(major, int), "Major version must be an integer."
assert isinstance(minor, int), "Minor version must be an integer."
assert isinstance(micro, int), "Micro version must be an integer."
assert state in ('final', 'dev'), "State must be either final or dev."
if state == 'final':
return "{}.{}.{}".format(major, minor, micro)
else:
return "{}.{}.{}.{}".format(major, minor, micro, state)
__version__ = get_version(VERSION)
|
xkmato/tracpro
|
tracpro/__init__.py
|
Python
|
bsd-3-clause
| 948
|
########
# Copyright (c) 2013 GigaSpaces Technologies Ltd. All rights reserved
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# * See the License for the specific language governing permissions and
# * limitations under the License.
from . import TestScaleBase
class TestScaleCompute(TestScaleBase):
def test_compute_scale_in_compute(self):
expectations = self.deploy_app('scale4')
expectations['compute']['new']['install'] = 3
self.deployment_assertions(expectations)
expectations = self.scale(parameters={
'scalable_entity_name': 'compute',
'delta': -1})
expectations['compute']['existing']['install'] = 2
expectations['compute']['removed']['install'] = 1
expectations['compute']['removed']['uninstall'] = 1
self.deployment_assertions(expectations)
def test_compute_scale_in_compute_ignore_failure_true(self):
expectations = self.deploy_app('scale_ignore_failure')
expectations['compute']['new']['install'] = 3
self.deployment_assertions(expectations)
expectations = self.scale(parameters={
'scalable_entity_name': 'compute',
'ignore_failure': True,
'delta': -1})
expectations['compute']['existing']['install'] = 2
expectations['compute']['removed']['install'] = 1
expectations['compute']['removed']['uninstall'] = 1
self.deployment_assertions(expectations)
def test_compute_scale_in_compute_ignore_failure_false(self):
expectations = self.deploy_app('scale_ignore_failure')
expectations['compute']['new']['install'] = 3
self.deployment_assertions(expectations)
try:
self.scale(parameters={
'scalable_entity_name': 'compute',
'ignore_failure': False,
'delta': -1})
except RuntimeError as e:
self.assertIn(
"RuntimeError: Workflow failed: Task failed "
"'testmockoperations.tasks.mock_stop_failure'",
str(e))
else:
self.fail()
def test_compute_scale_out_and_in_compute_from_0(self):
expectations = self.deploy_app('scale10')
expectations['compute']['new']['install'] = 0
self.deployment_assertions(expectations)
expectations = self.scale(parameters={
'scalable_entity_name': 'compute'})
expectations['compute']['new']['install'] = 1
self.deployment_assertions(expectations)
expectations = self.scale(parameters={
'scalable_entity_name': 'compute',
'delta': -1})
expectations['compute']['new']['install'] = 0
expectations['compute']['existing']['install'] = 0
expectations['compute']['removed']['install'] = 1
expectations['compute']['removed']['uninstall'] = 1
self.deployment_assertions(expectations)
def test_compute_scale_in_2_compute(self):
expectations = self.deploy_app('scale4')
expectations['compute']['new']['install'] = 3
self.deployment_assertions(expectations)
expectations = self.scale(parameters={
'scalable_entity_name': 'compute',
'delta': -2})
expectations['compute']['existing']['install'] = 1
expectations['compute']['removed']['install'] = 2
expectations['compute']['removed']['uninstall'] = 2
self.deployment_assertions(expectations)
def test_db_contained_in_compute_scale_in_compute(self):
expectations = self.deploy_app('scale5')
expectations['compute']['new']['install'] = 2
expectations['db']['new']['install'] = 4
expectations['db']['new']['rel_install'] = 8
self.deployment_assertions(expectations)
expectations = self.scale(parameters={
'scalable_entity_name': 'compute',
'delta': -1})
expectations['compute']['existing']['install'] = 1
expectations['compute']['removed']['install'] = 1
expectations['compute']['removed']['uninstall'] = 1
expectations['db']['existing']['install'] = 2
expectations['db']['existing']['rel_install'] = 4
expectations['db']['removed']['install'] = 2
expectations['db']['removed']['uninstall'] = 2
expectations['db']['removed']['rel_install'] = 4
expectations['db']['removed']['rel_uninstall'] = 4
self.deployment_assertions(expectations)
def test_db_connected_to_compute_scale_in_db(self):
expectations = self.deploy_app('scale6')
expectations['compute']['new']['install'] = 2
expectations['db']['new']['install'] = 2
expectations['db']['new']['rel_install'] = 8
self.deployment_assertions(expectations)
expectations = self.scale(parameters={
'scalable_entity_name': 'db',
'delta': -1})
expectations['compute']['existing']['install'] = 2
expectations['db']['existing']['install'] = 1
expectations['db']['existing']['rel_install'] = 4
expectations['db']['removed']['install'] = 1
expectations['db']['removed']['uninstall'] = 1
expectations['db']['removed']['rel_install'] = 4
expectations['db']['removed']['rel_uninstall'] = 4
self.deployment_assertions(expectations)
def test_db_connected_to_compute_scale_in_compute(self):
expectations = self.deploy_app('scale6')
expectations['compute']['new']['install'] = 2
expectations['db']['new']['install'] = 2
expectations['db']['new']['rel_install'] = 8
self.deployment_assertions(expectations)
expectations = self.scale(parameters={
'scalable_entity_name': 'compute',
'delta': -1})
expectations['compute']['existing']['install'] = 1
expectations['compute']['removed']['install'] = 1
expectations['compute']['removed']['uninstall'] = 1
expectations['db']['existing']['install'] = 2
expectations['db']['existing']['rel_install'] = 8
expectations['db']['existing']['rel_uninstall'] = 4
self.deployment_assertions(expectations)
def test_db_connected_to_compute_scale_in_and_out_compute_from_0(self):
expectations = self.deploy_app('scale11')
expectations['compute']['new']['install'] = 0
expectations['db']['new']['install'] = 1
expectations['db']['new']['rel_install'] = 0
self.deployment_assertions(expectations)
expectations = self.scale(parameters={
'scalable_entity_name': 'compute',
'delta': 1})
expectations['compute']['new']['install'] = 1
expectations['compute']['existing']['install'] = 0
expectations['db']['existing']['install'] = 1
expectations['db']['existing']['rel_install'] = 0
expectations['db']['existing']['scale_rel_install'] = 2
self.deployment_assertions(expectations)
expectations = self.scale(parameters={
'scalable_entity_name': 'compute',
'delta': -1})
expectations['compute']['new']['install'] = 0
expectations['compute']['existing']['install'] = 0
expectations['compute']['removed']['install'] = 1
expectations['compute']['removed']['uninstall'] = 1
expectations['db']['existing']['install'] = 1
expectations['db']['existing']['scale_rel_install'] = 2
expectations['db']['existing']['rel_uninstall'] = 2
self.deployment_assertions(expectations)
def test_db_contained_in_compute_scale_in_db_scale_db(self):
expectations = self.deploy_app('scale5')
expectations['compute']['new']['install'] = 2
expectations['db']['new']['install'] = 4
expectations['db']['new']['rel_install'] = 8
self.deployment_assertions(expectations)
expectations = self.scale(parameters={
'scalable_entity_name': 'db',
'delta': -1,
'scale_compute': False})
expectations['compute']['existing']['install'] = 2
expectations['db']['existing']['install'] = 2
expectations['db']['existing']['rel_install'] = 4
expectations['db']['removed']['install'] = 2
expectations['db']['removed']['uninstall'] = 2
expectations['db']['removed']['rel_install'] = 4
expectations['db']['removed']['rel_uninstall'] = 4
self.deployment_assertions(expectations)
def test_db_contained_in_compute_scale_in_db(self):
expectations = self.deploy_app('scale5')
expectations['compute']['new']['install'] = 2
expectations['db']['new']['install'] = 4
expectations['db']['new']['rel_install'] = 8
self.deployment_assertions(expectations)
expectations = self.scale(parameters={
'scalable_entity_name': 'db',
'delta': -1,
'scale_compute': True})
expectations['compute']['existing']['install'] = 1
expectations['compute']['removed']['install'] = 1
expectations['compute']['removed']['uninstall'] = 1
expectations['db']['existing']['install'] = 2
expectations['db']['existing']['rel_install'] = 4
expectations['db']['removed']['install'] = 2
expectations['db']['removed']['uninstall'] = 2
expectations['db']['removed']['rel_install'] = 4
expectations['db']['removed']['rel_uninstall'] = 4
self.deployment_assertions(expectations)
|
isaac-s/cloudify-manager
|
tests/integration_tests/tests/agentless_tests/scale/test_scale_in.py
|
Python
|
apache-2.0
| 9,915
|
# Copyright (C) 2003-2007 Robey Pointer <robeypointer@gmail.com>
#
# This file is part of paramiko.
#
# Paramiko is free software; you can redistribute it and/or modify it under the
# terms of the GNU Lesser General Public License as published by the Free
# Software Foundation; either version 2.1 of the License, or (at your option)
# any later version.
#
# Paramiko is distributed in the hope that it will be useful, but WITHOUT ANY
# WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR
# A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more
# details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with Paramiko; if not, write to the Free Software Foundation, Inc.,
# 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA.
"""
Packet handling
"""
import errno
import os
import socket
import struct
import threading
import time
from hmac import HMAC
from paramiko import util
from paramiko.common import linefeed_byte, cr_byte_value, asbytes, MSG_NAMES, \
DEBUG, xffffffff, zero_byte
from paramiko.py3compat import u, byte_ord
from paramiko.ssh_exception import SSHException, ProxyCommandFailure
from paramiko.message import Message
def compute_hmac(key, message, digest_class):
return HMAC(key, message, digest_class).digest()
class NeedRekeyException (Exception):
pass
class Packetizer (object):
"""
Implementation of the base SSH packet protocol.
"""
# READ the secsh RFC's before raising these values. if anything,
# they should probably be lower.
REKEY_PACKETS = pow(2, 29)
REKEY_BYTES = pow(2, 29)
REKEY_PACKETS_OVERFLOW_MAX = pow(2, 29) # Allow receiving this many packets after a re-key request before terminating
REKEY_BYTES_OVERFLOW_MAX = pow(2, 29) # Allow receiving this many bytes after a re-key request before terminating
def __init__(self, socket):
self.__socket = socket
self.__logger = None
self.__closed = False
self.__dump_packets = False
self.__need_rekey = False
self.__init_count = 0
self.__remainder = bytes()
# used for noticing when to re-key:
self.__sent_bytes = 0
self.__sent_packets = 0
self.__received_bytes = 0
self.__received_packets = 0
self.__received_bytes_overflow = 0
self.__received_packets_overflow = 0
# current inbound/outbound ciphering:
self.__block_size_out = 8
self.__block_size_in = 8
self.__mac_size_out = 0
self.__mac_size_in = 0
self.__block_engine_out = None
self.__block_engine_in = None
self.__sdctr_out = False
self.__mac_engine_out = None
self.__mac_engine_in = None
self.__mac_key_out = bytes()
self.__mac_key_in = bytes()
self.__compress_engine_out = None
self.__compress_engine_in = None
self.__sequence_number_out = 0
self.__sequence_number_in = 0
# lock around outbound writes (packet computation)
self.__write_lock = threading.RLock()
# keepalives:
self.__keepalive_interval = 0
self.__keepalive_last = time.time()
self.__keepalive_callback = None
self.__timer = None
self.__handshake_complete = False
self.__timer_expired = False
@property
def closed(self):
return self.__closed
def set_log(self, log):
"""
Set the Python log object to use for logging.
"""
self.__logger = log
def set_outbound_cipher(self, block_engine, block_size, mac_engine, mac_size, mac_key, sdctr=False):
"""
Switch outbound data cipher.
"""
self.__block_engine_out = block_engine
self.__sdctr_out = sdctr
self.__block_size_out = block_size
self.__mac_engine_out = mac_engine
self.__mac_size_out = mac_size
self.__mac_key_out = mac_key
self.__sent_bytes = 0
self.__sent_packets = 0
# wait until the reset happens in both directions before clearing rekey flag
self.__init_count |= 1
if self.__init_count == 3:
self.__init_count = 0
self.__need_rekey = False
def set_inbound_cipher(self, block_engine, block_size, mac_engine, mac_size, mac_key):
"""
Switch inbound data cipher.
"""
self.__block_engine_in = block_engine
self.__block_size_in = block_size
self.__mac_engine_in = mac_engine
self.__mac_size_in = mac_size
self.__mac_key_in = mac_key
self.__received_bytes = 0
self.__received_packets = 0
self.__received_bytes_overflow = 0
self.__received_packets_overflow = 0
# wait until the reset happens in both directions before clearing rekey flag
self.__init_count |= 2
if self.__init_count == 3:
self.__init_count = 0
self.__need_rekey = False
def set_outbound_compressor(self, compressor):
self.__compress_engine_out = compressor
def set_inbound_compressor(self, compressor):
self.__compress_engine_in = compressor
def close(self):
self.__closed = True
self.__socket.close()
def set_hexdump(self, hexdump):
self.__dump_packets = hexdump
def get_hexdump(self):
return self.__dump_packets
def get_mac_size_in(self):
return self.__mac_size_in
def get_mac_size_out(self):
return self.__mac_size_out
def need_rekey(self):
"""
Returns ``True`` if a new set of keys needs to be negotiated. This
will be triggered during a packet read or write, so it should be
checked after every read or write, or at least after every few.
"""
return self.__need_rekey
def set_keepalive(self, interval, callback):
"""
Turn on/off the callback keepalive. If ``interval`` seconds pass with
no data read from or written to the socket, the callback will be
executed and the timer will be reset.
"""
self.__keepalive_interval = interval
self.__keepalive_callback = callback
self.__keepalive_last = time.time()
def read_timer(self):
self.__timer_expired = True
def start_handshake(self, timeout):
"""
Tells `Packetizer` that the handshake process started.
Starts a book keeping timer that can signal a timeout in the
handshake process.
:param float timeout: amount of seconds to wait before timing out
"""
if not self.__timer:
self.__timer = threading.Timer(float(timeout), self.read_timer)
self.__timer.start()
def handshake_timed_out(self):
"""
Checks if the handshake has timed out.
If `start_handshake` wasn't called before the call to this function,
the return value will always be `False`. If the handshake completed
before a timeout was reached, the return value will be `False`
:return: handshake time out status, as a `bool`
"""
if not self.__timer:
return False
if self.__handshake_complete:
return False
return self.__timer_expired
def complete_handshake(self):
"""
Tells `Packetizer` that the handshake has completed.
"""
if self.__timer:
self.__timer.cancel()
self.__timer_expired = False
self.__handshake_complete = True
def read_all(self, n, check_rekey=False):
"""
Read as close to N bytes as possible, blocking as long as necessary.
:param int n: number of bytes to read
:return: the data read, as a `str`
:raises EOFError:
if the socket was closed before all the bytes could be read
"""
out = bytes()
# handle over-reading from reading the banner line
if len(self.__remainder) > 0:
out = self.__remainder[:n]
self.__remainder = self.__remainder[n:]
n -= len(out)
while n > 0:
got_timeout = False
if self.handshake_timed_out():
raise EOFError()
try:
x = self.__socket.recv(n)
if len(x) == 0:
raise EOFError()
out += x
n -= len(x)
except socket.timeout:
got_timeout = True
except socket.error as e:
# on Linux, sometimes instead of socket.timeout, we get
# EAGAIN. this is a bug in recent (> 2.6.9) kernels but
# we need to work around it.
if (type(e.args) is tuple) and (len(e.args) > 0) and (e.args[0] == errno.EAGAIN):
got_timeout = True
elif (type(e.args) is tuple) and (len(e.args) > 0) and (e.args[0] == errno.EINTR):
# syscall interrupted; try again
pass
elif self.__closed:
raise EOFError()
else:
raise
if got_timeout:
if self.__closed:
raise EOFError()
if check_rekey and (len(out) == 0) and self.__need_rekey:
raise NeedRekeyException()
self._check_keepalive()
return out
def write_all(self, out):
self.__keepalive_last = time.time()
iteration_with_zero_as_return_value = 0
while len(out) > 0:
retry_write = False
try:
n = self.__socket.send(out)
except socket.timeout:
retry_write = True
except socket.error as e:
if (type(e.args) is tuple) and (len(e.args) > 0) and (e.args[0] == errno.EAGAIN):
retry_write = True
elif (type(e.args) is tuple) and (len(e.args) > 0) and (e.args[0] == errno.EINTR):
# syscall interrupted; try again
retry_write = True
else:
n = -1
except ProxyCommandFailure:
raise # so it doesn't get swallowed by the below catchall
except Exception:
# could be: (32, 'Broken pipe')
n = -1
if retry_write:
n = 0
if self.__closed:
n = -1
else:
if n == 0 and iteration_with_zero_as_return_value > 10:
# We shouldn't retry the write, but we didn't
# manage to send anything over the socket. This might be an
# indication that we have lost contact with the remote side,
# but are yet to receive an EOFError or other socket errors.
# Let's give it some iteration to try and catch up.
n = -1
iteration_with_zero_as_return_value += 1
if n < 0:
raise EOFError()
if n == len(out):
break
out = out[n:]
return
def readline(self, timeout):
"""
Read a line from the socket. We assume no data is pending after the
line, so it's okay to attempt large reads.
"""
buf = self.__remainder
while not linefeed_byte in buf:
buf += self._read_timeout(timeout)
n = buf.index(linefeed_byte)
self.__remainder = buf[n + 1:]
buf = buf[:n]
if (len(buf) > 0) and (buf[-1] == cr_byte_value):
buf = buf[:-1]
return u(buf)
def send_message(self, data):
"""
Write a block of data using the current cipher, as an SSH block.
"""
# encrypt this sucka
data = asbytes(data)
cmd = byte_ord(data[0])
if cmd in MSG_NAMES:
cmd_name = MSG_NAMES[cmd]
else:
cmd_name = '$%x' % cmd
orig_len = len(data)
self.__write_lock.acquire()
try:
if self.__compress_engine_out is not None:
data = self.__compress_engine_out(data)
packet = self._build_packet(data)
if self.__dump_packets:
self._log(DEBUG, 'Write packet <%s>, length %d' % (cmd_name, orig_len))
self._log(DEBUG, util.format_binary(packet, 'OUT: '))
if self.__block_engine_out is not None:
out = self.__block_engine_out.update(packet)
else:
out = packet
# + mac
if self.__block_engine_out is not None:
payload = struct.pack('>I', self.__sequence_number_out) + packet
out += compute_hmac(self.__mac_key_out, payload, self.__mac_engine_out)[:self.__mac_size_out]
self.__sequence_number_out = (self.__sequence_number_out + 1) & xffffffff
self.write_all(out)
self.__sent_bytes += len(out)
self.__sent_packets += 1
if (self.__sent_packets >= self.REKEY_PACKETS or self.__sent_bytes >= self.REKEY_BYTES)\
and not self.__need_rekey:
# only ask once for rekeying
self._log(DEBUG, 'Rekeying (hit %d packets, %d bytes sent)' %
(self.__sent_packets, self.__sent_bytes))
self.__received_bytes_overflow = 0
self.__received_packets_overflow = 0
self._trigger_rekey()
finally:
self.__write_lock.release()
def read_message(self):
"""
Only one thread should ever be in this function (no other locking is
done).
:raises SSHException: if the packet is mangled
:raises NeedRekeyException: if the transport should rekey
"""
header = self.read_all(self.__block_size_in, check_rekey=True)
if self.__block_engine_in is not None:
header = self.__block_engine_in.update(header)
if self.__dump_packets:
self._log(DEBUG, util.format_binary(header, 'IN: '))
packet_size = struct.unpack('>I', header[:4])[0]
# leftover contains decrypted bytes from the first block (after the
# length field)
leftover = header[4:]
if (packet_size - len(leftover)) % self.__block_size_in != 0:
raise SSHException('Invalid packet blocking')
buf = self.read_all(packet_size + self.__mac_size_in - len(leftover))
packet = buf[:packet_size - len(leftover)]
post_packet = buf[packet_size - len(leftover):]
if self.__block_engine_in is not None:
packet = self.__block_engine_in.update(packet)
if self.__dump_packets:
self._log(DEBUG, util.format_binary(packet, 'IN: '))
packet = leftover + packet
if self.__mac_size_in > 0:
mac = post_packet[:self.__mac_size_in]
mac_payload = struct.pack('>II', self.__sequence_number_in, packet_size) + packet
my_mac = compute_hmac(self.__mac_key_in, mac_payload, self.__mac_engine_in)[:self.__mac_size_in]
if not util.constant_time_bytes_eq(my_mac, mac):
raise SSHException('Mismatched MAC')
padding = byte_ord(packet[0])
payload = packet[1:packet_size - padding]
if self.__dump_packets:
self._log(DEBUG, 'Got payload (%d bytes, %d padding)' % (packet_size, padding))
if self.__compress_engine_in is not None:
payload = self.__compress_engine_in(payload)
msg = Message(payload[1:])
msg.seqno = self.__sequence_number_in
self.__sequence_number_in = (self.__sequence_number_in + 1) & xffffffff
# check for rekey
raw_packet_size = packet_size + self.__mac_size_in + 4
self.__received_bytes += raw_packet_size
self.__received_packets += 1
if self.__need_rekey:
# we've asked to rekey -- give them some packets to comply before
# dropping the connection
self.__received_bytes_overflow += raw_packet_size
self.__received_packets_overflow += 1
if (self.__received_packets_overflow >= self.REKEY_PACKETS_OVERFLOW_MAX) or \
(self.__received_bytes_overflow >= self.REKEY_BYTES_OVERFLOW_MAX):
raise SSHException('Remote transport is ignoring rekey requests')
elif (self.__received_packets >= self.REKEY_PACKETS) or \
(self.__received_bytes >= self.REKEY_BYTES):
# only ask once for rekeying
self._log(DEBUG, 'Rekeying (hit %d packets, %d bytes received)' %
(self.__received_packets, self.__received_bytes))
self.__received_bytes_overflow = 0
self.__received_packets_overflow = 0
self._trigger_rekey()
cmd = byte_ord(payload[0])
if cmd in MSG_NAMES:
cmd_name = MSG_NAMES[cmd]
else:
cmd_name = '$%x' % cmd
if self.__dump_packets:
self._log(DEBUG, 'Read packet <%s>, length %d' % (cmd_name, len(payload)))
return cmd, msg
########## protected
def _log(self, level, msg):
if self.__logger is None:
return
if issubclass(type(msg), list):
for m in msg:
self.__logger.log(level, m)
else:
self.__logger.log(level, msg)
def _check_keepalive(self):
if (not self.__keepalive_interval) or (not self.__block_engine_out) or \
self.__need_rekey:
# wait till we're encrypting, and not in the middle of rekeying
return
now = time.time()
if now > self.__keepalive_last + self.__keepalive_interval:
self.__keepalive_callback()
self.__keepalive_last = now
def _read_timeout(self, timeout):
start = time.time()
while True:
try:
x = self.__socket.recv(128)
if len(x) == 0:
raise EOFError()
break
except socket.timeout:
pass
except EnvironmentError as e:
if (type(e.args) is tuple and len(e.args) > 0 and
e.args[0] == errno.EINTR):
pass
else:
raise
if self.__closed:
raise EOFError()
now = time.time()
if now - start >= timeout:
raise socket.timeout()
return x
def _build_packet(self, payload):
# pad up at least 4 bytes, to nearest block-size (usually 8)
bsize = self.__block_size_out
padding = 3 + bsize - ((len(payload) + 8) % bsize)
packet = struct.pack('>IB', len(payload) + padding + 1, padding)
packet += payload
if self.__sdctr_out or self.__block_engine_out is None:
# cute trick i caught openssh doing: if we're not encrypting or SDCTR mode (RFC4344),
# don't waste random bytes for the padding
packet += (zero_byte * padding)
else:
packet += os.urandom(padding)
return packet
def _trigger_rekey(self):
# outside code should check for this flag
self.__need_rekey = True
|
hipnusleo/laserjet
|
resource/pypi/paramiko-2.1.1/paramiko/packet.py
|
Python
|
apache-2.0
| 19,925
|
# This source file is part of Soundcloud-syncer.
#
# Soundcloud-syncer is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Soundcloud-syncer is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along with
# Soundcloud-syncer. If not, see <http://www.gnu.org/licenses/gpl-3.0.html>.
import sys
import unittest
from mock import Mock
from mock import MagicMock
sys.path.insert(0, "../../")
from ssyncer.suser import suser
from ssyncer.strack import strack
from mock_data import json_bytes
def mock_tracks_response(uri):
def json_res():
return json_bytes
response = MagicMock()
response.read = json_res
return response
class TestSuser(unittest.TestCase):
def test_object_has_good_name(self):
""" Test object has name `foo`. """
client = Mock()
object = suser("Foo", client=client)
self.assertEqual("Foo", object.name)
def test_parse_response(self):
""" Test parse tracks response on success. """
client = MagicMock()
object = suser("Foo", client=client)
tracks = object._parse_response(mock_tracks_response("bar"), strack)
self.assertEquals(3, len(tracks))
for track in tracks:
self.assertIsInstance(track, strack)
def test_get_likes_with_default_offset_and_limit(self):
""" Test get user's likes with default offset and limit. """
client = MagicMock()
client.USER_LIKES = "/u/%s/f.json?o=%d&l=%d&c="
object = suser("Foo", client=client)
object._parse_response = Mock()
object.get_likes()
client.get.assert_called_once_with(
"/u/Foo/f.json?o=0&l=50&c=")
def test_get_likes_with_custom_offset_and_limit(self):
""" Test get user's likes with custom offset and limit. """
client = MagicMock()
client.USER_LIKES = "/u/%s/f.json?o=%d&l=%d&c="
object = suser("Foo", client=client)
object._parse_response = Mock()
object.get_likes(10, 20)
client.get.assert_called_once_with(
"/u/Foo/f.json?o=10&l=20&c=")
def test_get_tracks_with_default_offset_and_limit(self):
""" Test get user's tracks with default offset and limit. """
client = MagicMock()
client.USER_TRACKS = "/u/%s/t.json?o=%d&l=%d&c="
object = suser("Foo", client=client)
object._parse_response = Mock()
object.get_tracks()
client.get.assert_called_once_with(
"/u/Foo/t.json?o=0&l=50&c=")
def test_get_tracks_with_custom_offset_and_limit(self):
""" Test get user's tracks with custom offset and limit. """
client = MagicMock()
client.USER_TRACKS = "/u/%s/t.json?o=%d&l=%d&c="
object = suser("Foo", client=client)
object._parse_response = Mock()
object.get_tracks(10, 20)
client.get.assert_called_once_with(
"/u/Foo/t.json?o=10&l=20&c=")
def test_get_playlists_with_custom_offset_and_limit(self):
""" Test get user's playlists with custom offset and limit. """
client = MagicMock()
client.USER_PLAYLISTS = "/u/%s/p.json?o=%d&l=%d&c="
object = suser("Foo", client=client)
object._parse_response = Mock()
object.get_playlists(10, 20)
client.get.assert_called_once_with(
"/u/Foo/p.json?o=10&l=20&c=")
|
Sliim/soundcloud-syncer
|
tests/ssyncer/test_suser.py
|
Python
|
gpl-3.0
| 3,772
|
# Copyright 2012 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from tempest_lib import exceptions as lib_exc
from tempest.api.compute import base
from tempest.api import utils
from tempest.common import fixed_network
from tempest.common.utils import data_utils
from tempest.common import waiters
from tempest import config
from tempest import test
CONF = config.CONF
class ListServerFiltersTestJSON(base.BaseV2ComputeTest):
@classmethod
def setup_credentials(cls):
cls.set_network_resources(network=True, subnet=True, dhcp=True)
super(ListServerFiltersTestJSON, cls).setup_credentials()
@classmethod
def setup_clients(cls):
super(ListServerFiltersTestJSON, cls).setup_clients()
cls.client = cls.servers_client
@classmethod
def resource_setup(cls):
super(ListServerFiltersTestJSON, cls).resource_setup()
# Check to see if the alternate image ref actually exists...
images_client = cls.images_client
images = images_client.list_images()
if cls.image_ref != cls.image_ref_alt and \
any([image for image in images
if image['id'] == cls.image_ref_alt]):
cls.multiple_images = True
else:
cls.image_ref_alt = cls.image_ref
# Do some sanity checks here. If one of the images does
# not exist, fail early since the tests won't work...
try:
cls.images_client.show_image(cls.image_ref)
except lib_exc.NotFound:
raise RuntimeError("Image %s (image_ref) was not found!" %
cls.image_ref)
try:
cls.images_client.show_image(cls.image_ref_alt)
except lib_exc.NotFound:
raise RuntimeError("Image %s (image_ref_alt) was not found!" %
cls.image_ref_alt)
network = cls.get_tenant_network()
if network:
cls.fixed_network_name = network.get('name')
else:
cls.fixed_network_name = None
network_kwargs = fixed_network.set_networks_kwarg(network)
cls.s1_name = data_utils.rand_name(cls.__name__ + '-instance')
cls.s1 = cls.create_test_server(name=cls.s1_name,
wait_until='ACTIVE',
**network_kwargs)
cls.s2_name = data_utils.rand_name(cls.__name__ + '-instance')
cls.s2 = cls.create_test_server(name=cls.s2_name,
image_id=cls.image_ref_alt,
wait_until='ACTIVE')
cls.s3_name = data_utils.rand_name(cls.__name__ + '-instance')
cls.s3 = cls.create_test_server(name=cls.s3_name,
flavor=cls.flavor_ref_alt,
wait_until='ACTIVE')
@test.idempotent_id('05e8a8e7-9659-459a-989d-92c2f501f4ba')
@utils.skip_unless_attr('multiple_images', 'Only one image found')
def test_list_servers_filter_by_image(self):
# Filter the list of servers by image
params = {'image': self.image_ref}
body = self.client.list_servers(**params)
servers = body['servers']
self.assertIn(self.s1['id'], map(lambda x: x['id'], servers))
self.assertNotIn(self.s2['id'], map(lambda x: x['id'], servers))
self.assertIn(self.s3['id'], map(lambda x: x['id'], servers))
@test.idempotent_id('573637f5-7325-47bb-9144-3476d0416908')
def test_list_servers_filter_by_flavor(self):
# Filter the list of servers by flavor
params = {'flavor': self.flavor_ref_alt}
body = self.client.list_servers(**params)
servers = body['servers']
self.assertNotIn(self.s1['id'], map(lambda x: x['id'], servers))
self.assertNotIn(self.s2['id'], map(lambda x: x['id'], servers))
self.assertIn(self.s3['id'], map(lambda x: x['id'], servers))
@test.idempotent_id('9b067a7b-7fee-4f6a-b29c-be43fe18fc5a')
def test_list_servers_filter_by_server_name(self):
# Filter the list of servers by server name
params = {'name': self.s1_name}
body = self.client.list_servers(**params)
servers = body['servers']
self.assertIn(self.s1_name, map(lambda x: x['name'], servers))
self.assertNotIn(self.s2_name, map(lambda x: x['name'], servers))
self.assertNotIn(self.s3_name, map(lambda x: x['name'], servers))
@test.idempotent_id('ca78e20e-fddb-4ce6-b7f7-bcbf8605e66e')
def test_list_servers_filter_by_server_status(self):
# Filter the list of servers by server status
params = {'status': 'active'}
body = self.client.list_servers(**params)
servers = body['servers']
self.assertIn(self.s1['id'], map(lambda x: x['id'], servers))
self.assertIn(self.s2['id'], map(lambda x: x['id'], servers))
self.assertIn(self.s3['id'], map(lambda x: x['id'], servers))
@test.idempotent_id('451dbbb2-f330-4a9f-b0e1-5f5d2cb0f34c')
def test_list_servers_filter_by_shutoff_status(self):
# Filter the list of servers by server shutoff status
params = {'status': 'shutoff'}
self.client.stop(self.s1['id'])
waiters.wait_for_server_status(self.client, self.s1['id'],
'SHUTOFF')
body = self.client.list_servers(**params)
self.client.start(self.s1['id'])
waiters.wait_for_server_status(self.client, self.s1['id'],
'ACTIVE')
servers = body['servers']
self.assertIn(self.s1['id'], map(lambda x: x['id'], servers))
self.assertNotIn(self.s2['id'], map(lambda x: x['id'], servers))
self.assertNotIn(self.s3['id'], map(lambda x: x['id'], servers))
@test.idempotent_id('614cdfc1-d557-4bac-915b-3e67b48eee76')
def test_list_servers_filter_by_limit(self):
# Verify only the expected number of servers are returned
params = {'limit': 1}
servers = self.client.list_servers(**params)
self.assertEqual(1, len([x for x in servers['servers'] if 'id' in x]))
@test.idempotent_id('b1495414-2d93-414c-8019-849afe8d319e')
def test_list_servers_filter_by_zero_limit(self):
# Verify only the expected number of servers are returned
params = {'limit': 0}
servers = self.client.list_servers(**params)
self.assertEqual(0, len(servers['servers']))
@test.idempotent_id('37791bbd-90c0-4de0-831e-5f38cba9c6b3')
def test_list_servers_filter_by_exceed_limit(self):
# Verify only the expected number of servers are returned
params = {'limit': 100000}
servers = self.client.list_servers(**params)
all_servers = self.client.list_servers()
self.assertEqual(len([x for x in all_servers['servers'] if 'id' in x]),
len([x for x in servers['servers'] if 'id' in x]))
@test.idempotent_id('b3304c3b-97df-46d2-8cd3-e2b6659724e7')
@utils.skip_unless_attr('multiple_images', 'Only one image found')
def test_list_servers_detailed_filter_by_image(self):
# Filter the detailed list of servers by image
params = {'image': self.image_ref}
body = self.client.list_servers(detail=True, **params)
servers = body['servers']
self.assertIn(self.s1['id'], map(lambda x: x['id'], servers))
self.assertNotIn(self.s2['id'], map(lambda x: x['id'], servers))
self.assertIn(self.s3['id'], map(lambda x: x['id'], servers))
@test.idempotent_id('80c574cc-0925-44ba-8602-299028357dd9')
def test_list_servers_detailed_filter_by_flavor(self):
# Filter the detailed list of servers by flavor
params = {'flavor': self.flavor_ref_alt}
body = self.client.list_servers(detail=True, **params)
servers = body['servers']
self.assertNotIn(self.s1['id'], map(lambda x: x['id'], servers))
self.assertNotIn(self.s2['id'], map(lambda x: x['id'], servers))
self.assertIn(self.s3['id'], map(lambda x: x['id'], servers))
@test.idempotent_id('f9eb2b70-735f-416c-b260-9914ac6181e4')
def test_list_servers_detailed_filter_by_server_name(self):
# Filter the detailed list of servers by server name
params = {'name': self.s1_name}
body = self.client.list_servers(detail=True, **params)
servers = body['servers']
self.assertIn(self.s1_name, map(lambda x: x['name'], servers))
self.assertNotIn(self.s2_name, map(lambda x: x['name'], servers))
self.assertNotIn(self.s3_name, map(lambda x: x['name'], servers))
@test.idempotent_id('de2612ab-b7dd-4044-b0b1-d2539601911f')
def test_list_servers_detailed_filter_by_server_status(self):
# Filter the detailed list of servers by server status
params = {'status': 'active'}
body = self.client.list_servers(detail=True, **params)
servers = body['servers']
test_ids = [s['id'] for s in (self.s1, self.s2, self.s3)]
self.assertIn(self.s1['id'], map(lambda x: x['id'], servers))
self.assertIn(self.s2['id'], map(lambda x: x['id'], servers))
self.assertIn(self.s3['id'], map(lambda x: x['id'], servers))
self.assertEqual(['ACTIVE'] * 3, [x['status'] for x in servers
if x['id'] in test_ids])
@test.idempotent_id('e9f624ee-92af-4562-8bec-437945a18dcb')
def test_list_servers_filtered_by_name_wildcard(self):
# List all servers that contains '-instance' in name
params = {'name': '-instance'}
body = self.client.list_servers(**params)
servers = body['servers']
self.assertIn(self.s1_name, map(lambda x: x['name'], servers))
self.assertIn(self.s2_name, map(lambda x: x['name'], servers))
self.assertIn(self.s3_name, map(lambda x: x['name'], servers))
# Let's take random part of name and try to search it
part_name = self.s1_name[6:-1]
params = {'name': part_name}
body = self.client.list_servers(**params)
servers = body['servers']
self.assertIn(self.s1_name, map(lambda x: x['name'], servers))
self.assertNotIn(self.s2_name, map(lambda x: x['name'], servers))
self.assertNotIn(self.s3_name, map(lambda x: x['name'], servers))
@test.idempotent_id('24a89b0c-0d55-4a28-847f-45075f19b27b')
def test_list_servers_filtered_by_name_regex(self):
# list of regex that should match s1, s2 and s3
regexes = ['^.*\-instance\-[0-9]+$', '^.*\-instance\-.*$']
for regex in regexes:
params = {'name': regex}
body = self.client.list_servers(**params)
servers = body['servers']
self.assertIn(self.s1_name, map(lambda x: x['name'], servers))
self.assertIn(self.s2_name, map(lambda x: x['name'], servers))
self.assertIn(self.s3_name, map(lambda x: x['name'], servers))
# Let's take random part of name and try to search it
part_name = self.s1_name[-10:]
params = {'name': part_name}
body = self.client.list_servers(**params)
servers = body['servers']
self.assertIn(self.s1_name, map(lambda x: x['name'], servers))
self.assertNotIn(self.s2_name, map(lambda x: x['name'], servers))
self.assertNotIn(self.s3_name, map(lambda x: x['name'], servers))
@test.idempotent_id('43a1242e-7b31-48d1-88f2-3f72aa9f2077')
def test_list_servers_filtered_by_ip(self):
# Filter servers by ip
# Here should be listed 1 server
if not self.fixed_network_name:
msg = 'fixed_network_name needs to be configured to run this test'
raise self.skipException(msg)
self.s1 = self.client.show_server(self.s1['id'])
for addr_spec in self.s1['addresses'][self.fixed_network_name]:
ip = addr_spec['addr']
if addr_spec['version'] == 4:
params = {'ip': ip}
break
else:
msg = "Skipped until bug 1450859 is resolved"
raise self.skipException(msg)
body = self.client.list_servers(**params)
servers = body['servers']
self.assertIn(self.s1_name, map(lambda x: x['name'], servers))
self.assertNotIn(self.s2_name, map(lambda x: x['name'], servers))
self.assertNotIn(self.s3_name, map(lambda x: x['name'], servers))
@test.idempotent_id('a905e287-c35e-42f2-b132-d02b09f3654a')
def test_list_servers_filtered_by_ip_regex(self):
# Filter servers by regex ip
# List all servers filtered by part of ip address.
# Here should be listed all servers
if not self.fixed_network_name:
msg = 'fixed_network_name needs to be configured to run this test'
raise self.skipException(msg)
self.s1 = self.client.show_server(self.s1['id'])
addr_spec = self.s1['addresses'][self.fixed_network_name][0]
ip = addr_spec['addr'][0:-3]
if addr_spec['version'] == 4:
params = {'ip': ip}
else:
params = {'ip6': ip}
# capture all servers in case something goes wrong
all_servers = self.client.list_servers(detail=True)
body = self.client.list_servers(**params)
servers = body['servers']
self.assertIn(self.s1_name, map(lambda x: x['name'], servers),
"%s not found in %s, all servers %s" %
(self.s1_name, servers, all_servers))
self.assertIn(self.s2_name, map(lambda x: x['name'], servers),
"%s not found in %s, all servers %s" %
(self.s2_name, servers, all_servers))
self.assertIn(self.s3_name, map(lambda x: x['name'], servers),
"%s not found in %s, all servers %s" %
(self.s3_name, servers, all_servers))
@test.idempotent_id('67aec2d0-35fe-4503-9f92-f13272b867ed')
def test_list_servers_detailed_limit_results(self):
# Verify only the expected number of detailed results are returned
params = {'limit': 1}
servers = self.client.list_servers(detail=True, **params)
self.assertEqual(1, len(servers['servers']))
|
tudorvio/tempest
|
tempest/api/compute/servers/test_list_server_filters.py
|
Python
|
apache-2.0
| 14,862
|
#!/usr/bin/env python
# -*- coding: UTF-8 -*-
import os
from file import File
from folder import Folder
from music_file import MusicFile
class FolderList(list):
def __init__(self, path, recurse=False, hidden=False, show_folders=False):
# Ensure unicode
self.__path = unicode(path)
# Save some flags
self.__flag_recurse = recurse
self.__flag_hidden = hidden
self.__flag_folders = show_folders
self.refresh()
def refresh(self):
''' Resets the folder, grabbing the latest names and such '''
# Clears the list, use self.clear() for python 3
self[:] = []
# Find all the files
os.path.walk(self.__path, self, self.__path)
def _to_file(self, path):
file = File(path)
if file.type() == File.TYPE_MUSIC:
return MusicFile(path)
return file
def _to_folder(self, path):
folder = Folder(path)
return folder
def __call__(self, arg, dirname, fnames):
remove = []
# decide which directories to keep going into
for index, name in enumerate(fnames):
path = os.path.abspath(os.path.join(dirname, name))
# Decide whether to Hide any hidden files
if not self.__flag_hidden and os.path.basename(name).startswith('.'):
remove.append(name)
elif os.path.isdir(name):
# Check for any recursion
if not self.__flag_recurse:
remove.append(name)
# Also add the folders to the list if needed
if self.__flag_folders:
self.append(self._to_folder(path))
# Add the files we find
elif os.path.isfile(path):
self.append(self._to_file(path))
# Now remove the filtered folders
# Modifying this list makes os.path.walk not recurse into the given directories
for name in remove:
fnames.remove(name)
|
Saevon/Recipes
|
folder_list/folder_list.py
|
Python
|
mit
| 2,012
|
"""Provide the RedditorList class."""
from .base import BaseList
class RedditorList(BaseList):
"""A list of Redditors. Works just like a regular list."""
CHILD_ATTRIBUTE = "children"
|
gschizas/praw
|
praw/models/list/redditor.py
|
Python
|
bsd-2-clause
| 194
|
# coding: utf-8
from django import forms
from django.contrib.auth import authenticate
from forum.models import ForumUser
from django.conf import settings
error_messages = {
'username':{
'required': u'必须填写用户名',
'min_length': u'用户名长度过短 (3-12字符)',
'max_length': u'用户名长度过长(3-12个字符)',
'invalid': u'用户名格式错误(英文字母开头,数字,下划线构成)'
},
'email':{
'required': u'必须填写E-mail',
'min_required': u'Email长度有误',
'max_length': u'Email长度有误',
'invalid': u'Emial地址无效'
},
'password':{
'required': u'必须填写密码',
'min_length': u'密码长度过短(6-64个字符)',
'max_length': u'密码长度过长(6-64个字符)'
},
}
class LoginForm(forms.Form):
email = forms.EmailField(min_length=4, max_length=64,
error_messages=error_messages.get('email'))
password = forms.CharField(min_length=6, max_length=64,
error_messages=error_messages.get('password'))
def __init__(self, *args, **kwargs):
super(LoginForm, self).__init__(*args, **kwargs)
def clean(self):
email = self.cleaned_data.get('email')
password = self.cleaned_data.get('password')
if email and password:
self.user_cache = authenticate(email=email, password=password)
if self.user_cache is None:
raise forms.ValidationError(u'邮箱或者密码不正确')
elif not self.user_cache.is_active:
raise forms.ValidationError(u"用户已被锁定, 请联系管理员解锁")
return self.cleaned_data
def get_user(self):
return self.user_cache
class RegisterForm(forms.Form):
username = forms.RegexField(min_length=3, max_length=12,
regex=r'^[a-zA-Z][a-zA-Z0-9_]*$',
error_messages=error_messages.get('username'))
email = forms.EmailField(min_length=4, max_length=64,
error_messages=error_messages.get('email'))
password = forms.CharField(min_length=6, max_length=64,
error_messages=error_messages.get('password'))
password_confirm = forms.CharField(required=False)
#class Meta:
# model = ForumUser
# field = ('username', )
def __init__(self, *args, **kwargs):
super(RegisterForm, self).__init__(*args, **kwargs)
def clean_username(self):
username = self.cleaned_data.get('username')
try:
ForumUser.objects.get(username=username)
raise forms.ValidationError(u'所填的用户名已经被注册过')
except ForumUser.DoesNotExist:
if username in settings.RESERVED:
raise forms.ValidationError(u'用户名已保留不可用')
return username
def clean_email(self):
email = self.cleaned_data.get('email')
try:
ForumUser.objects.get(email=email)
raise forms.ValidationError(u'所填的邮箱已经被注册过')
except ForumUser.DoesNotExist:
return email
def clean_password_confirm(self):
password1 = self.cleaned_data.get('password')
password2 = self.cleaned_data.get('password_confirm')
if password1 and password2 and password2 != password1:
raise forms.ValidationError(u'两次输入密码不一致')
return password2
|
Liubusy/V2GO
|
forum/forms/user.py
|
Python
|
mit
| 3,044
|
import re
import sys
import copy
from .import RefinerUtils
from pandajedi.jedicore import Interaction
from .TaskRefinerBase import TaskRefinerBase
from pandajedi.jedicore.JediTaskSpec import JediTaskSpec
from pandajedi.jedicore.JediDatasetSpec import JediDatasetSpec
# refiner for lost file recovery
class FileRecovery (TaskRefinerBase):
# constructor
def __init__(self,taskBufferIF,ddmIF):
TaskRefinerBase.__init__(self,taskBufferIF,ddmIF)
# extract common parameters
def extractCommon(self,jediTaskID,taskParamMap,workQueueMapper):
return self.SC_SUCCEEDED
# check matching of dataset names
def checkDatasetNameMatching(self,datasetName,patternList):
# name list is not given
if patternList is None:
return False
# loop over all names
for namePattern in patternList:
if re.search('^'+namePattern+'$',datasetName) is not None:
return True
return False
# main
def doRefine(self,jediTaskID,taskParamMap):
try:
# make logger
tmpLog = self.tmpLog
tmpLog.debug('start jediTaskID={0}'.format(jediTaskID))
# old dataset name
oldDatasetName = taskParamMap['oldDatasetName']
# accompany datasets
if 'oldAccompanyDatasetNames' in taskParamMap:
oldAccDatasetNames = taskParamMap['oldAccompanyDatasetNames']
else:
oldAccDatasetNames = None
# use first file to get task and dataset info
lostFileName = taskParamMap['lostFiles'][0]
# get ole jediTaskID and datasetIDs
tmpStat,oldIDs = self.taskBufferIF.getIDsWithFileDataset_JEDI(oldDatasetName,lostFileName,'output')
if tmpStat is not True or oldIDs is None:
tmpLog.error('failed to get jediTaskID and DatasetID for {0}:{1}'.format(oldDatasetName,
lostFileName))
return self.SC_FAILED
# get task
oldJediTaskID = oldIDs['jediTaskID']
oldDatasetID = oldIDs['datasetID']
tmpStat,oldTaskSpec = self.taskBufferIF.getTaskWithID_JEDI(oldJediTaskID,True)
if tmpStat is not True:
tmpLog.error('failed to get TaskSpec for old jediTaskId={0}'.format(oldJediTaskID))
return self.SC_FAILED
# make task spec
taskSpec = JediTaskSpec()
taskSpec.copyAttributes(oldTaskSpec)
# reset attributes
taskSpec.jediTaskID = jediTaskID
taskSpec.taskType = taskParamMap['taskType']
taskSpec.taskPriority = taskParamMap['taskPriority']
self.taskSpec = taskSpec
# get datasets
tmpStat,datasetSpecList = self.taskBufferIF.getDatasetsWithJediTaskID_JEDI(oldJediTaskID)
if tmpStat is not True:
tmpLog.error('failed to get datasetSpecs')
return self.SC_FAILED
# loop over all datasets
provenanceID = None
dummyStreams = []
outDatasetSpec = None
datasetNameSpecMap = {}
for datasetSpec in datasetSpecList:
# for output datasets
if datasetSpec.type not in JediDatasetSpec.getInputTypes():
# collect output with the same provenanceID
if provenanceID is not None and datasetSpec.provenanceID != provenanceID:
continue
# set provenanceID if undefined
if provenanceID is None and datasetSpec.provenanceID is not None:
provenanceID = datasetSpec.provenanceID
# collect dummy streams
if datasetSpec.type != 'log' and (datasetSpec.datasetID != oldDatasetID and \
not self.checkDatasetNameMatching(datasetSpec.datasetName,oldAccDatasetNames)):
if datasetSpec.streamName not in dummyStreams:
dummyStreams.append(datasetSpec.streamName)
continue
# reset attributes
datasetSpec.status = 'defined'
datasetSpec.datasetID = None
datasetSpec.jediTaskID = jediTaskID
datasetSpec.nFiles = 0
datasetSpec.nFilesUsed = 0
datasetSpec.nFilesToBeUsed = 0
datasetSpec.nFilesFinished = 0
datasetSpec.nFilesFailed = 0
datasetSpec.nFilesOnHold = 0
# remove nosplit and repeat since even the same file is made for each bounaryID
datasetSpec.remNoSplit()
datasetSpec.remRepeat()
# append to map
datasetNameSpecMap[datasetSpec.datasetName] = datasetSpec
# set master and secondary for input
if datasetSpec.type in JediDatasetSpec.getInputTypes():
if datasetSpec.isMaster():
# master
self.inMasterDatasetSpec = datasetSpec
else:
# secondary
self.inSecDatasetSpecList.append(datasetSpec)
elif datasetSpec.type == 'log':
# set new attributes
tmpItem = taskParamMap['log']
datasetSpec.datasetName = tmpItem['dataset']
if 'container' in tmpItem:
datasetSpec.containerName = tmpItem['container']
if 'token' in tmpItem:
datasetSpec.storageToken = tmpItem['token']
if 'destination' in tmpItem:
datasetSpec.destination = tmpItem['destination']
# extract output filename template and change the value field
outFileTemplate,tmpItem['value'] = RefinerUtils.extractReplaceOutFileTemplate(tmpItem['value'],
datasetSpec.streamName)
# make output template
if outFileTemplate is not None:
if 'offset' in tmpItem:
offsetVal = 1 + tmpItem['offset']
else:
offsetVal = 1
outTemplateMap = {'jediTaskID' : self.taskSpec.jediTaskID,
'serialNr' : offsetVal,
'streamName' : datasetSpec.streamName,
'filenameTemplate' : outFileTemplate,
'outtype' : datasetSpec.type,
}
self.outputTemplateMap[datasetSpec.outputMapKey()] = [outTemplateMap]
# append
self.outDatasetSpecList.append(datasetSpec)
else:
# output dataset to make copies later
outDatasetSpec = datasetSpec
# replace redundant output streams with dummy files
for dummyStream in dummyStreams:
self.taskSpec.jobParamsTemplate = self.taskSpec.jobParamsTemplate.replace('${'+dummyStream+'}',
dummyStream.lower()+'.tmp')
self.setJobParamsTemplate(self.taskSpec.jobParamsTemplate)
# loop over all lost files
datasetIDSpecMap = {}
for lostFileName in taskParamMap['lostFiles']:
# get FileID
tmpStat,tmpIDs = self.taskBufferIF.getIDsWithFileDataset_JEDI(oldDatasetName,lostFileName,'output')
if tmpStat is not True or tmpIDs is None:
tmpLog.error('failed to get FileID for {0}:{1}'.format(oldDatasetName,
lostFileName))
return self.SC_FAILED
# get PandaID
tmpStat,pandaID = self.taskBufferIF.getPandaIDWithFileID_JEDI(tmpIDs['jediTaskID'],
tmpIDs['datasetID'],
tmpIDs['fileID'])
if tmpStat is not True or pandaID is None:
tmpLog.error('failed to get PandaID for {0}'.format(str(tmpIDs)))
return self.SC_FAILED
# get files
tmpStat,fileSpecList = self.taskBufferIF.getFilesWithPandaID_JEDI(pandaID)
if tmpStat is not True or fileSpecList == []:
tmpLog.error('failed to get files for PandaID={0}'.format(pandaID))
return self.SC_FAILED
# append
for fileSpec in fileSpecList:
# only input types
if fileSpec.type not in JediDatasetSpec.getInputTypes():
continue
# get original datasetSpec
if fileSpec.datasetID not in datasetIDSpecMap:
tmpStat,tmpDatasetSpec = self.taskBufferIF.getDatasetWithID_JEDI(fileSpec.jediTaskID,fileSpec.datasetID)
if tmpStat is not True or tmpDatasetSpec is None:
tmpLog.error('failed to get dataset for jediTaskID={0} datasetID={1}'.format(fileSpec.jediTaskID,
fileSpec.datasetID))
return self.SC_FAILED
datasetIDSpecMap[fileSpec.datasetID] = tmpDatasetSpec
origDatasetSpec = datasetIDSpecMap[fileSpec.datasetID]
if origDatasetSpec.datasetName not in datasetNameSpecMap:
tmpLog.error('datasetName={0} is missing in new datasets'.format(origDatasetSpec.datasetName))
return self.SC_FAILED
# not target or accompany datasets
if origDatasetSpec.datasetID != oldDatasetID and \
not self.checkDatasetNameMatching(origDatasetSpec.datasetName,oldAccDatasetNames):
continue
newDatasetSpec = datasetNameSpecMap[origDatasetSpec.datasetName]
# set new attributes
fileSpec.fileID = None
fileSpec.datasetID = None
fileSpec.jediTaskID = None
fileSpec.boundaryID = pandaID
fileSpec.keepTrack = 1
fileSpec.attemptNr = 1
fileSpec.status = 'ready'
# append
newDatasetSpec.addFile(fileSpec)
# make one output dataset per file
datasetSpec = copy.copy(outDatasetSpec)
# set new attributes
tmpItem = taskParamMap['output']
datasetSpec.datasetName = tmpItem['dataset']
if 'container' in tmpItem:
datasetSpec.containerName = tmpItem['container']
if 'token' in tmpItem:
datasetSpec.storageToken = tmpItem['token']
if 'destination' in tmpItem:
datasetSpec.destination = tmpItem['destination']
# use PandaID of original job as provenanceID
datasetSpec.provenanceID = pandaID
# append
self.outDatasetSpecList.append(datasetSpec)
# extract attempt number from original filename
tmpMatch = re.search('\.(\d+)$',lostFileName)
if tmpMatch is None:
offsetVal = 1
else:
offsetVal = 1 + int(tmpMatch.group(1))
# filename without attempt number
baseFileName = re.sub('\.(\d+)$','',lostFileName)
# make output template
outTemplateMap = {'jediTaskID' : self.taskSpec.jediTaskID,
'serialNr' : offsetVal,
'streamName' : datasetSpec.streamName,
'filenameTemplate' : baseFileName + '.${SN:d}',
'outtype' : datasetSpec.type,
}
self.outputTemplateMap[datasetSpec.outputMapKey()] = [outTemplateMap]
# append datasets to task parameters
for datasetSpec in datasetNameSpecMap.values():
if datasetSpec.Files == []:
continue
fileList = []
for fileSpec in datasetSpec.Files:
fileList.append({'lfn':fileSpec.lfn,
'firstEvent':fileSpec.firstEvent,
'startEvent':fileSpec.startEvent,
'endEvent':fileSpec.endEvent,
'keepTrack':fileSpec.keepTrack,
'boundaryID':fileSpec.boundaryID,
})
taskParamMap = RefinerUtils.appendDataset(taskParamMap,datasetSpec,fileList)
self.updatedTaskParams = taskParamMap
# grouping with boundaryID
self.setSplitRule(None,4,JediTaskSpec.splitRuleToken['groupBoundaryID'])
except Exception:
errtype,errvalue = sys.exc_info()[:2]
tmpLog.error('doRefine failed with {0}:{1}'.format(errtype.__name__,errvalue))
return self.SC_FAILED
tmpLog.debug('done')
return self.SC_SUCCEEDED
|
PanDAWMS/panda-jedi
|
pandajedi/jedirefine/FileRecovery.py
|
Python
|
apache-2.0
| 14,043
|
import os
import hashlib
import yaml
import sys
import json
import click
from prettytable import PrettyTable
from bitshares.account import Account
from .decorators import onlineChain, unlockWallet, configfile
from .main import main, config
from .ui import print_message
@main.group()
@onlineChain
@click.pass_context
@click.option(
"--configfile", default="config.yaml", help="YAML file with configuration"
)
def api(ctx, configfile):
""" Open an local API for trading bots
"""
ctx.obj["configfile"] = configfile
@api.command()
@click.pass_context
def create(ctx):
""" Create default config file
"""
import shutil
this_dir, this_filename = os.path.split(__file__)
default_config_file = os.path.join(this_dir, "apis/example-config.yaml")
config_file = ctx.obj["configfile"]
shutil.copyfile(default_config_file, config_file)
print_message("Config file created: {}".format(config_file))
@api.command()
@click.pass_context
@configfile
def start(ctx):
""" Start the API according to the config file
"""
module = ctx.config.get("api", "poloniex")
# unlockWallet
if module == "poloniex":
from .apis import poloniex
poloniex.run(ctx, port=5000)
else:
print_message("Unkown 'api'!", "error")
@api.command()
@click.option(
"--password",
prompt="Plain Text Password",
hide_input=True,
confirmation_prompt=False,
help="Plain Text Password",
)
def apipassword(password):
""" Generate a SHA256 hash of the password for the YAML
configuration
"""
print_message(hashlib.sha256(bytes(password, "utf-8")).hexdigest(), "info")
|
xeroc/uptick
|
uptick/api.py
|
Python
|
mit
| 1,654
|
# Copyright (c) 2013, Web Notes Technologies Pvt. Ltd. and Contributors
# License: GNU General Public License v3. See license.txt
from __future__ import unicode_literals
import frappe
from frappe.widgets.reportview import get_match_cond
from frappe.model.db_query import DatabaseQuery
def get_filters_cond(doctype, filters, conditions):
if filters:
if isinstance(filters, dict):
filters = filters.items()
flt = []
for f in filters:
if isinstance(f[1], basestring) and f[1][0] == '!':
flt.append([doctype, f[0], '!=', f[1][1:]])
else:
flt.append([doctype, f[0], '=', f[1]])
query = DatabaseQuery(doctype)
query.filters = flt
query.conditions = conditions
query.build_filter_conditions(flt, conditions)
cond = ' and ' + ' and '.join(query.conditions)
else:
cond = ''
return cond
# searches for active employees
def employee_query(doctype, txt, searchfield, start, page_len, filters):
return frappe.db.sql("""select name, employee_name from `tabEmployee`
where status = 'Active'
and docstatus < 2
and ({key} like %(txt)s
or employee_name like %(txt)s)
{mcond}
order by
if(locate(%(_txt)s, name), locate(%(_txt)s, name), 99999),
if(locate(%(_txt)s, employee_name), locate(%(_txt)s, employee_name), 99999),
name, employee_name
limit %(start)s, %(page_len)s""".format(**{
'key': searchfield,
'mcond': get_match_cond(doctype)
}), {
'txt': "%%%s%%" % txt,
'_txt': txt.replace("%", ""),
'start': start,
'page_len': page_len
})
# searches for leads which are not converted
def lead_query(doctype, txt, searchfield, start, page_len, filters):
return frappe.db.sql("""select name, lead_name, company_name from `tabLead`
where docstatus < 2
and ifnull(status, '') != 'Converted'
and ({key} like %(txt)s
or lead_name like %(txt)s
or company_name like %(txt)s)
{mcond}
order by
if(locate(%(_txt)s, name), locate(%(_txt)s, name), 99999),
if(locate(%(_txt)s, lead_name), locate(%(_txt)s, lead_name), 99999),
if(locate(%(_txt)s, company_name), locate(%(_txt)s, company_name), 99999),
name, lead_name
limit %(start)s, %(page_len)s""".format(**{
'key': searchfield,
'mcond':get_match_cond(doctype)
}), {
'txt': "%%%s%%" % txt,
'_txt': txt.replace("%", ""),
'start': start,
'page_len': page_len
})
# searches for customer
def customer_query(doctype, txt, searchfield, start, page_len, filters):
cust_master_name = frappe.defaults.get_user_default("cust_master_name")
if cust_master_name == "Customer Name":
fields = ["name", "customer_group", "territory"]
else:
fields = ["name", "customer_name", "customer_group", "territory"]
fields = ", ".join(fields)
return frappe.db.sql("""select {fields} from `tabCustomer`
where docstatus < 2
and ({key} like %(txt)s
or customer_name like %(txt)s)
{mcond}
order by
if(locate(%(_txt)s, name), locate(%(_txt)s, name), 99999),
if(locate(%(_txt)s, customer_name), locate(%(_txt)s, customer_name), 99999),
name, customer_name
limit %(start)s, %(page_len)s""".format(**{
"fields": fields,
"key": searchfield,
"mcond": get_match_cond(doctype)
}), {
'txt': "%%%s%%" % txt,
'_txt': txt.replace("%", ""),
'start': start,
'page_len': page_len
})
# searches for supplier
def supplier_query(doctype, txt, searchfield, start, page_len, filters):
supp_master_name = frappe.defaults.get_user_default("supp_master_name")
if supp_master_name == "Supplier Name":
fields = ["name", "supplier_type"]
else:
fields = ["name", "supplier_name", "supplier_type"]
fields = ", ".join(fields)
return frappe.db.sql("""select {field} from `tabSupplier`
where docstatus < 2
and ({key} like %(txt)s
or supplier_name like %(txt)s)
{mcond}
order by
if(locate(%(_txt)s, name), locate(%(_txt)s, name), 99999),
if(locate(%(_txt)s, supplier_name), locate(%(_txt)s, supplier_name), 99999),
name, supplier_name
limit %(start)s, %(page_len)s """.format(**{
'field': fields,
'key': searchfield,
'mcond':get_match_cond(doctype)
}), {
'txt': "%%%s%%" % txt,
'_txt': txt.replace("%", ""),
'start': start,
'page_len': page_len
})
def tax_account_query(doctype, txt, searchfield, start, page_len, filters):
tax_accounts = frappe.db.sql("""select name, parent_account from tabAccount
where tabAccount.docstatus!=2
and account_type in (%s)
and group_or_ledger = 'Ledger'
and company = %s
and `%s` LIKE %s
limit %s, %s""" %
(", ".join(['%s']*len(filters.get("account_type"))), "%s", searchfield, "%s", "%s", "%s"),
tuple(filters.get("account_type") + [filters.get("company"), "%%%s%%" % txt,
start, page_len]))
if not tax_accounts:
tax_accounts = frappe.db.sql("""select name, parent_account from tabAccount
where tabAccount.docstatus!=2 and group_or_ledger = 'Ledger'
and company = %s and `%s` LIKE %s limit %s, %s"""
% ("%s", searchfield, "%s", "%s", "%s"),
(filters.get("company"), "%%%s%%" % txt, start, page_len))
return tax_accounts
def item_query(doctype, txt, searchfield, start, page_len, filters):
from frappe.utils import nowdate
conditions = []
return frappe.db.sql("""select tabItem.name,
if(length(tabItem.item_name) > 40,
concat(substr(tabItem.item_name, 1, 40), "..."), item_name) as item_name,
if(length(tabItem.description) > 40, \
concat(substr(tabItem.description, 1, 40), "..."), description) as decription
from tabItem
where tabItem.docstatus < 2
and (tabItem.end_of_life > %(today)s or ifnull(tabItem.end_of_life, '0000-00-00')='0000-00-00')
and (tabItem.`{key}` LIKE %(txt)s
or tabItem.item_name LIKE %(txt)s
or tabItem.description LIKE %(txt)s)
{fcond} {mcond}
order by
if(locate(%(_txt)s, name), locate(%(_txt)s, name), 99999),
if(locate(%(_txt)s, item_name), locate(%(_txt)s, item_name), 99999),
name, item_name
limit %(start)s, %(page_len)s """.format(key=searchfield,
fcond=get_filters_cond(doctype, filters, conditions),
mcond=get_match_cond(doctype)),
{
"today": nowdate(),
"txt": "%%%s%%" % txt,
"_txt": txt.replace("%", ""),
"start": start,
"page_len": page_len
})
def bom(doctype, txt, searchfield, start, page_len, filters):
conditions = []
return frappe.db.sql("""select tabBOM.name, tabBOM.item
from tabBOM
where tabBOM.docstatus=1
and tabBOM.is_active=1
and tabBOM.%(key)s like "%(txt)s"
%(fcond)s %(mcond)s
limit %(start)s, %(page_len)s """ % {'key': searchfield, 'txt': "%%%s%%" % txt,
'fcond': get_filters_cond(doctype, filters, conditions),
'mcond':get_match_cond(doctype), 'start': start, 'page_len': page_len})
def get_project_name(doctype, txt, searchfield, start, page_len, filters):
cond = ''
if filters.get('customer'):
cond = '(`tabProject`.customer = "' + filters['customer'] + '" or ifnull(`tabProject`.customer,"")="") and'
return frappe.db.sql("""select `tabProject`.name from `tabProject`
where `tabProject`.status not in ("Completed", "Cancelled")
and %(cond)s `tabProject`.name like "%(txt)s" %(mcond)s
order by `tabProject`.name asc
limit %(start)s, %(page_len)s """ % {'cond': cond,'txt': "%%%s%%" % txt,
'mcond':get_match_cond(doctype),'start': start, 'page_len': page_len})
def get_delivery_notes_to_be_billed(doctype, txt, searchfield, start, page_len, filters):
return frappe.db.sql("""select `tabDelivery Note`.name, `tabDelivery Note`.customer_name
from `tabDelivery Note`
where `tabDelivery Note`.`%(key)s` like %(txt)s and
`tabDelivery Note`.docstatus = 1 %(fcond)s and
(ifnull((select sum(qty) from `tabDelivery Note Item` where
`tabDelivery Note Item`.parent=`tabDelivery Note`.name), 0) >
ifnull((select sum(qty) from `tabSales Invoice Item` where
`tabSales Invoice Item`.docstatus = 1 and
`tabSales Invoice Item`.delivery_note=`tabDelivery Note`.name), 0))
%(mcond)s order by `tabDelivery Note`.`%(key)s` asc
limit %(start)s, %(page_len)s""" % {
"key": searchfield,
"fcond": get_filters_cond(doctype, filters, []),
"mcond": get_match_cond(doctype),
"start": "%(start)s", "page_len": "%(page_len)s", "txt": "%(txt)s"
}, { "start": start, "page_len": page_len, "txt": ("%%%s%%" % txt) })
def get_batch_no(doctype, txt, searchfield, start, page_len, filters):
from erpnext.controllers.queries import get_match_cond
if filters.has_key('warehouse'):
return frappe.db.sql("""select batch_no from `tabStock Ledger Entry` sle
where item_code = '%(item_code)s'
and warehouse = '%(warehouse)s'
and batch_no like '%(txt)s'
and exists(select * from `tabBatch`
where name = sle.batch_no
and (ifnull(expiry_date, '')='' or expiry_date >= '%(posting_date)s')
and docstatus != 2)
%(mcond)s
group by batch_no having sum(actual_qty) > 0
order by batch_no desc
limit %(start)s, %(page_len)s """ % {'item_code': filters['item_code'],
'warehouse': filters['warehouse'], 'posting_date': filters['posting_date'],
'txt': "%%%s%%" % txt, 'mcond':get_match_cond(doctype),
'start': start, 'page_len': page_len})
else:
return frappe.db.sql("""select name from tabBatch
where docstatus != 2
and item = '%(item_code)s'
and (ifnull(expiry_date, '')='' or expiry_date >= '%(posting_date)s')
and name like '%(txt)s'
%(mcond)s
order by name desc
limit %(start)s, %(page_len)s""" % {'item_code': filters['item_code'],
'posting_date': filters['posting_date'], 'txt': "%%%s%%" % txt,
'mcond':get_match_cond(doctype),'start': start,
'page_len': page_len})
def get_account_list(doctype, txt, searchfield, start, page_len, filters):
filter_list = []
if isinstance(filters, dict):
for key, val in filters.items():
if isinstance(val, (list, tuple)):
filter_list.append([doctype, key, val[0], val[1]])
else:
filter_list.append([doctype, key, "=", val])
elif isinstance(filters, list):
filter_list.extend(filters)
if "group_or_ledger" not in [d[1] for d in filter_list]:
filter_list.append(["Account", "group_or_ledger", "=", "Ledger"])
if searchfield and txt:
filter_list.append([doctype, searchfield, "like", "%%%s%%" % txt])
return frappe.widgets.reportview.execute("Account", filters = filter_list,
fields = ["name", "parent_account"],
limit_start=start, limit_page_length=page_len, as_list=True)
|
BhupeshGupta/erpnext
|
erpnext/controllers/queries.py
|
Python
|
agpl-3.0
| 10,371
|
#!/usr/bin/env python
#############################################################################
##
## Copyright (C) 2013 Riverbank Computing Limited.
## Copyright (C) 2013 Digia Plc and/or its subsidiary(-ies).
## All rights reserved.
##
## This file is part of the examples of PyQt.
##
## $QT_BEGIN_LICENSE:BSD$
## You may use this file under the terms of the BSD license as follows:
##
## "Redistribution and use in source and binary forms, with or without
## modification, are permitted provided that the following conditions are
## met:
## * Redistributions of source code must retain the above copyright
## notice, this list of conditions and the following disclaimer.
## * Redistributions in binary form must reproduce the above copyright
## notice, this list of conditions and the following disclaimer in
## the documentation and/or other materials provided with the
## distribution.
## * Neither the name of Nokia Corporation and its Subsidiary(-ies) nor
## the names of its contributors may be used to endorse or promote
## products derived from this software without specific prior written
## permission.
##
## THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
## "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
## LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
## A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
## OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
## SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
## LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
## DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
## THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
## (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
## OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE."
## $QT_END_LICENSE$
##
#############################################################################
from PyQt5.QtCore import QDir, QSize, QSizeF, Qt, QUrl, QRectF
from PyQt5.QtGui import QTransform
from PyQt5.QtMultimedia import QMediaContent, QMediaPlayer
from PyQt5.QtMultimediaWidgets import QGraphicsVideoItem
from PyQt5.QtWidgets import (QApplication, QFileDialog, QGraphicsScene,
QGraphicsView, QHBoxLayout, QPushButton, QSlider, QStyle, QVBoxLayout,
QWidget, QSizePolicy, QPlainTextEdit)
import os
import sys
# add parent folder to python path
sys.path.insert(1, os.path.join(sys.path[0], '..'))
from video import get_video_resolution
WIDTH = 600.0
WIGGLE = 40.0
class VideoPlayer(QWidget):
"""
Arguments
---------
parent: QWidget, the parent widget of VideoPlayer
display_status: bool, default False, will show the status of the media player in the gui
"""
def __init__(self, parent=None, display_status=False):
super(VideoPlayer, self).__init__(parent)
self.display_status = display_status
self.mediaPlayer = QMediaPlayer(None, QMediaPlayer.VideoSurface)
self.videoItem = QGraphicsVideoItem()
scene = QGraphicsScene(self)
graphicsView = QGraphicsView(scene)
scene.addItem(self.videoItem)
self.playButton = QPushButton()
self.playButton.setEnabled(False)
self.playButton.setIcon(self.style().standardIcon(QStyle.SP_MediaPlay))
self.playButton.clicked.connect(self.play)
self.positionSlider = QSlider(Qt.Horizontal)
self.positionSlider.setRange(0, 0)
self.positionSlider.sliderMoved.connect(self.setPosition)
if self.display_status:
self.status_mapping = {
QMediaPlayer.UnknownMediaStatus: "UnknownMediaStatus",
QMediaPlayer.NoMedia: "NoMedia",
QMediaPlayer.LoadingMedia: "LoadingMedia",
QMediaPlayer.LoadedMedia: "LoadedMedia",
QMediaPlayer.StalledMedia: "StalledMedia",
QMediaPlayer.BufferingMedia: "BufferingMedia",
QMediaPlayer.BufferedMedia: "BufferedMedia",
QMediaPlayer.EndOfMedia: "EndOfMedia",
QMediaPlayer.InvalidMedia: "InvalidMedia"
}
self.statusText = QPlainTextEdit()
self.statusText.setReadOnly(True)
self.statusText.setFixedHeight(25)
self.statusText.setFixedWidth(150)
self.mediaPlayer.mediaStatusChanged.connect(self.mediaStatusChanged)
controlLayout = QHBoxLayout()
controlLayout.setContentsMargins(0, 0, 0, 0)
controlLayout.addWidget(self.playButton)
controlLayout.addWidget(self.positionSlider)
if self.display_status:
controlLayout.addWidget(self.statusText)
layout = QVBoxLayout()
layout.addWidget(graphicsView)
layout.addLayout(controlLayout)
self.setFixedWidth(WIDTH + WIGGLE)
self.setLayout(layout)
self.mediaPlayer.setVideoOutput(self.videoItem)
self.mediaPlayer.stateChanged.connect(self.mediaStateChanged)
self.mediaPlayer.positionChanged.connect(self.positionChanged)
self.mediaPlayer.durationChanged.connect(self.durationChanged)
def openFile(self, fileName):
if fileName != '' or fileName is not None:
self.mediaPlayer.setMedia(
QMediaContent(QUrl.fromLocalFile(fileName)))
# set resolution
res_orig = get_video_resolution(fileName)
self.aspect_ratio = float(res_orig[0]) / res_orig[1]
self.videoItem.setSize(QSizeF(WIDTH,
WIDTH / self.aspect_ratio))
self.setFixedHeight(WIDTH / self.aspect_ratio + 2*WIGGLE)
self.playButton.setEnabled(True)
# trick to show screenshot of the first frame of video
self.mediaPlayer.play()
self.mediaPlayer.pause()
def closeFile(self):
self.mediaPlayer.setMedia(QMediaContent())
def play(self):
if self.mediaPlayer.state() == QMediaPlayer.PlayingState:
self.mediaPlayer.pause()
else:
self.mediaPlayer.play()
def mediaStateChanged(self, state):
if self.mediaPlayer.state() == QMediaPlayer.PlayingState:
self.playButton.setIcon(
self.style().standardIcon(QStyle.SP_MediaPause))
else:
self.playButton.setIcon(
self.style().standardIcon(QStyle.SP_MediaPlay))
def mediaStatusChanged(self, status):
self.statusText.setPlaceholderText(self.status_mapping[status])
def positionChanged(self, position):
self.positionSlider.setValue(position)
#print self.positionSlider.value()
# if position slider has reached the end, let's stop the video
if self.positionSlider.value() >= self.positionSlider.maximum() - 1:
self.mediaPlayer.stop()
# play/pause hack to show the first frame of video
self.mediaPlayer.play()
self.mediaPlayer.pause()
def durationChanged(self, duration):
self.positionSlider.setRange(0, duration)
def setPosition(self, position):
self.mediaPlayer.setPosition(position)
if __name__ == '__main__':
import sys
app = QApplication(sys.argv)
player = VideoPlayer()
player.show()
sys.exit(app.exec_())
|
santosfamilyfoundation/SantosGUI
|
application/custom/videographicsitem.py
|
Python
|
mit
| 7,446
|
__version__ = '0.1.0'
from .subscribe import subscribe
|
ebrelsford/mailchimp_subscribe
|
mailchimp_subscribe/__init__.py
|
Python
|
bsd-2-clause
| 56
|
import os
import copy
import scipy.interpolate as spi
import math
import numpy as np
import matplotlib.pyplot as plt
from scipy.optimize import curve_fit
data_root = 'toneclassifier'
train_data_path = "%s/train" % data_root
val_data_path = "%s/test" % data_root
test_data_path = "%s/test_new" % data_root
def SetPath(root):
global data_root, train_data_path, val_data_path, test_data_path
data_root = root
train_data_path = "%s/train" % data_root
val_data_path = "%s/test" % data_root
test_data_path = "%s/test_new" % data_root
labels = {
'one': 0,
'two': 1,
'three': 2,
'four': 3
}
def LoadData(mode='train'):
data_path = train_data_path
if mode == 'val':
data_path = val_data_path
elif mode == 'test':
data_path = test_data_path
Engy = []
F0 = []
y = []
for labelName, label in labels.iteritems():
data_subset_path = "%s/%s" % (data_path, labelName)
data_names = set()
for filename in os.listdir(data_subset_path):
if filename[0] == ".":
continue
if ".engy" in filename:
data_names.add(filename[0:-5])
elif ".f0" in filename:
data_names.add(filename[0:-3])
for data_name in data_names:
engy = map(float, open("%s/%s.engy" % (data_subset_path, data_name)).readlines())
f0 = map(float, open("%s/%s.f0" % (data_subset_path, data_name)).readlines())
Engy.append(engy)
F0.append(f0)
y.append(label)
return Engy, F0, y
def IgnoreLowEnergyFrequence(Engy, F0):
data_num = len(Engy)
if data_num != len(F0):
raise ValueError("the number of input data mismatched. len(Engy)==%d and len(F0)==%d" % (len(Engy), len(F0)))
resEngy = []
resF0 = []
for i in xrange(data_num):
engy = copy.copy(Engy[i])
f0 = copy.copy(F0[i])
data_len = len(engy)
if data_len != len(f0):
raise ValueError("the length of %d-th data mismatched. len(engy)==%d and len(f0)==%d" % (i, len(engy), len(f0)))
zero_freq_engy_sum = 0.0
zero_freq_count = 0.0
for j in xrange(data_len):
if f0[j] < 1e-4:
zero_freq_count += 1
zero_freq_engy_sum += math.sqrt(engy[j])
mean_engy = zero_freq_engy_sum / zero_freq_count
for j in xrange(data_len):
if math.sqrt(engy[j]) <= max(mean_engy, 1.0):
f0[j] = 0.0
resEngy.append(engy)
resF0.append(f0)
return resEngy, resF0
def TrimData(Engy, F0):
data_num = len(Engy)
if data_num != len(F0):
raise ValueError("the number of input data mismatched. len(Engy)==%d and len(F0)==%d" % (len(Engy), len(F0)))
resEngy = []
resF0 = []
for i in xrange(data_num):
engy = copy.copy(Engy[i])
f0 = copy.copy(F0[i])
data_len = len(engy)
if data_len != len(f0):
raise ValueError("the length of %d-th data mismatched. len(engy)==%d and len(f0)==%d" % (i, len(engy), len(f0)))
start = None
end = None
for i in xrange(len(f0)):
if f0[i] > 1e-5:
start = i
break
for i in xrange(len(f0) - 1, -1, -1):
if f0[i] > 1e-5:
end = i + 1
break
resEngy.append(copy.copy(engy[start:end]))
resF0.append(copy.copy(f0[start:end]))
return resEngy, resF0
def TransformToMelFrequencyScale(F0):
data_num = len(F0)
resF0 = []
for i in xrange(data_num):
f0 = copy.copy(F0[i])
data_len = len(f0)
for j in xrange(data_len):
f0[j] = 1127 * math.log(1 + f0[j] / 700)
resF0.append(f0)
return resF0
def DivSingleDataStd(F0):
data_num = len(F0)
resF0 = []
for i in xrange(data_num):
f0 = copy.copy(F0[i])
data_len = len(f0)
f0arr = np.asarray(f0)
std = f0arr.std()
f0arr = f0arr / std
for j in xrange(data_len):
f0[j] = f0arr[j]
resF0.append(f0)
return resF0
def DivDataStd(F0):
data_num = len(F0)
resF0 = []
tmp = []
for i in xrange(data_num):
for j in xrange(len(F0[i])):
tmp.append(F0[i][j])
F0arr = np.asarray(tmp)
std = F0arr.std()
for i in xrange(data_num):
f0 = copy.copy(F0[i])
data_len = len(f0)
for j in xrange(data_len):
f0[j] = f0[j] / std
resF0.append(f0)
return resF0
def SmoothRawF0(F0):
C1 = 15
data_num = len(F0)
resF0 = []
for i in xrange(data_num):
f0 = copy.copy(F0[i])
data_len = len(f0)
for k in xrange(data_len - 1, -1, -1):
for j in xrange(k, data_len):
if abs(f0[j] - f0[j - 1]) < C1:
continue
if abs(f0[j] / 2 - f0[j - 1]) < C1:
f0[j] /= 2
elif abs(2 * f0[j] - f0[j - 1]) < C1:
f0[j] *= 2
resF0.append(f0)
return resF0
def SmoothF0(F0):
C1 = 0.16
C2 = 0.4
data_num = len(F0)
resF0 = []
for i in xrange(data_num):
f0 = copy.copy(F0[i])
data_len = len(f0)
for j in xrange(1, data_len):
if abs(f0[j] - f0[j - 1]) < C1:
continue
if abs(f0[j] / 2 - f0[j - 1]) < C1:
f0[j] /= 2
elif abs(2 * f0[j] - f0[j - 1]) < C1:
f0[j] *= 2
ff0 = copy.copy([f0[0]] + f0 + [f0[-1]])
fff0 = copy.copy(ff0)
data_len = len(ff0)
f0_2 = (ff0[0], ff0[0])
for j in xrange(1, data_len - 1):
if abs(ff0[j] - ff0[j - 1]) > C1 and abs(ff0[j + 1] - ff0[j - 1]) > C2:
ff0[j] = 2 * f0_2[1] - f0_2[0]
elif abs(ff0[j] - ff0[j - 1]) > C1 and abs(ff0[j + 1] - ff0[j - 1]) <= C2:
ff0[j] = (ff0[j - 1] + ff0[j + 1]) / 2
f0_2 = (f0_2[1], ff0[j])
res_f0 = None
if abs(ff0[-1] - fff0[-1]) <= C1:
res_f0 = ff0
else:
f0_2 = (fff0[-1], fff0[-1])
for j in xrange(data_len - 2, 0, -1):
if abs(fff0[j] - fff0[j + 1]) > C1 and abs(fff0[j - 1] - fff0[j + 1]) > C2:
fff0[j] = 2 * f0_2[1] - f0_2[0]
elif abs(fff0[j] - fff0[j + 1]) > C1 and abs(fff0[j - 1] - fff0[j + 1]) <= C2:
fff0[j] = (fff0[j - 1] + fff0[j + 1]) / 2
f0_2 = (f0_2[1], fff0[j])
s = 0
for j in xrange(data_len - 2, 0, -1):
if abs(fff0[j] - ff0[j]) < C1:
s = j
break
res_f0 = ff0[: s + 1] + fff0[s + 1: ]
res_f0 = [res_f0[0]] + res_f0 + [res_f0[-1]]
data_len = len(res_f0)
for j in xrange(2, data_len - 2):
res_f0[j] = (res_f0[j - 2] + res_f0[j - 1] + res_f0[j] + res_f0[j + 1] + res_f0[j + 2]) / 5.0
resF0.append(res_f0[2:-2])
return resF0
def NormalizeDataLengthWithInterpolation(Engy, F0, result_len=200):
data_num = len(Engy)
if data_num != len(F0):
raise ValueError("the number of input data mismatched. len(Engy)==%d and len(F0)==%d" % (len(Engy), len(F0)))
resEngy = []
resF0 = []
for i in xrange(data_num):
engy = copy.copy(Engy[i])
f0 = copy.copy(F0[i])
data_len = len(engy)
if data_len != len(f0):
raise ValueError(
"the length of %d-th data mismatched. len(engy)==%d and len(f0)==%d" % (i, len(engy), len(f0)))
k = float(result_len - 1) / float(data_len - 1)
x = [i * k for i in xrange(data_len)]
newX = [i * 1.0 for i in xrange(result_len)]
newX[-1] = x[-1]
new_engy = spi.interp1d(x, engy, kind='cubic')(newX)
new_f0 = spi.interp1d(x, f0, kind='cubic')(newX)
resEngy.append(new_engy)
resF0.append(new_f0)
return resEngy, resF0
def SingleDataDivideMax(data):
mean = np.asarray(data).max()
for i in xrange(len(data)):
data[i] /= mean
return data
def DataSetDivideMax(Data):
for i in xrange(len(Data)):
Data[i] = SingleDataDivideMax(Data[i])
return Data
def SingleDataMinusMean(data):
mean = np.asarray(data).mean()
for i in xrange(len(data)):
data[i] -= mean
return data
def DataSetMinusMean(Data):
for i in xrange(len(Data)):
Data[i] = SingleDataMinusMean(Data[i])
return Data
def SaveData(Engy, F0, y, mode='train'):
save_engy_name = 'train_engys'
save_f0_name = 'train_f0s'
save_y_name = 'train_labels'
if mode == 'val':
save_engy_name = 'val_engys'
save_f0_name = 'val_f0s'
save_y_name = 'val_labels'
elif mode == 'test':
save_engy_name = 'test_engys'
save_f0_name = 'test_f0s'
save_y_name = 'test_labels'
engy_file = open(save_engy_name, "w")
f0_file = open(save_f0_name, "w")
y_file = open(save_y_name, "w")
data_num = len(Engy)
if data_num != len(F0) or data_num != len(y):
raise ValueError("the number of data mismatched, Engy:%d, F0:%d, y:%d" % (len(Engy), len(F0), len(y)))
for i in xrange(data_num):
engy_file.write("%s\n" % (' '.join(map(lambda x: "%.5f" % x, Engy[i]))))
f0_file.write("%s\n" % (' '.join(map(lambda x: "%.5f" % x, F0[i]))))
y_file.write("%d\n"% y[i])
engy_file.close()
f0_file.close()
y_file.close()
def PlotAndSaveF0(plot_prefix='train', F0=None, y=None):
max_len = max(map(len, F0))
for label in xrange(4):
for i in xrange(len(F0)):
if (y[i] != label):
continue
coff = float(max_len - 1) / (len(F0[i]) - 1)
x = np.arange(0, len(F0[i]), 1)
x = coff * x
fx = np.asarray(F0[i])
plt.plot(x, fx)
plt.savefig('%s-plt_%d' % (plot_prefix, label))
plt.clf()
def Amplify(Data, times):
for i in xrange(len(Data)):
for j in xrange(len(Data[i])):
Data[i][j] *= times
return Data
def unison_shuffled_copies(a, b):
assert len(a) == len(b)
p = np.random.permutation(len(a))
return a[p], b[p]
def FitMissPoint(F0):
data_num = len(F0)
resF0 = []
for i in xrange(data_num):
f0 = F0[i]
data_len = len(f0)
f0arr = np.asarray(f0)
mean = f0arr.mean()
x = []
y = []
for j in xrange(data_len):
if f0[j] > 0.1 * mean:
x.append(j)
y.append(f0[j])
z = np.poly1d(np.polyfit(x, y, 2))
for j in xrange(data_len):
if f0[j] <= 0.1 * mean:
f0[j] = z(j)
resF0.append(f0)
return resF0
def AddWhiteNoise(F0):
data_num = len(F0)
for i in xrange(data_num):
data_len = len(F0[i])
for j in xrange(data_len):
F0[i][j] += np.random.normal(0, 1e-4)
return F0
def FitSingleData(f0):
data_len = len(f0)
flag = []
x = []
y = []
for i in xrange(data_len):
if f0[i] > 10:
x.append(i)
y.append(f0[i])
flag.append(True)
else:
flag.append(False)
z = np.polyfit(x, y, 2)
a, b, c = z
z = np.poly1d(z)
for i in xrange(data_len):
if f0[i] <= 1.:
f0[i] = z(i)
# Solve 2a * x + b == 0
g = -b / (2 * a)
g = int(g)
if g > 1 and g < data_len - 1:
part_a = f0[: g]
flag_a = flag[: g]
part_b = f0[g: ]
flag_b = flag[g: ]
x = []
y = []
for i in xrange(len(part_a)):
x.append(i)
y.append(f0[i])
z = np.poly1d(np.polyfit(x, y, 1))
for i in xrange(len(part_a)):
if not flag_a[i]:
part_a[i] = z(i)
x = []
y = []
for i in xrange(len(part_b)):
x.append(i)
y.append(f0[i])
z = np.poly1d(np.polyfit(x, y, 1))
for i in xrange(len(part_b)):
if not flag_b[i]:
part_b[i] = z(i)
f0 = part_a + part_b
else:
x = []
y = []
for i in xrange(data_len):
x.append(i)
y.append(f0[i])
z = np.poly1d(np.polyfit(x, y, 1))
for i in xrange(data_len):
if not flag[i]:
f0[i] = z(i)
return f0
def FitData(F0):
data_num = len(F0)
for i in xrange(data_num):
F0[i] = FitSingleData(F0[i])
return F0
|
BreakVoid/DL_Project
|
data_utils.py
|
Python
|
mit
| 12,608
|
# Copyright 2015 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Module that integrates PyUnit (unittest) with Checkers.
This module provides an integration with PyUnit. Note that this requires at
least Python 2.7 to work because that's when load_tests was added to PyUnit.
This allows us to run Checkers tests through the PyUnit framework. Simply build
up the PyUnitTestRun just as you would a normal Checkers TestRun in the
load_tests method of the main module and return the pyunit_suite.
Note that this will just add the Checkers tests to whatever tests that PyUnit
would have run anyway.
Example:
from wherever import foo_tests # Contains the checkers.TestCase functions.
def load_tests(loader, tests, pattern):
test_run = PyUnitTestRun(loader, tests)
test_run.LoadTestCasesFromModule(foo_tests, 'FooTests')
return test_run.pyunit_suite
if __name__ == '__main__':
unittest.main()
"""
import gflags
import sys
import traceback
import unittest
from checkers.python import checkers
FLAGS = gflags.FLAGS
def _ListOfTestsGenerator(s):
tests = []
for test in s:
if unittest.suite._isnotsuite(test):
tests.append(type(test))
else:
tests += _ListOfTestsGenerator(test)
return tests
class PyUnitTestSuite(unittest.TestCase):
def setUp(self):
for setup in self.test_run.test_case_setup:
setup(self.test_run)
def tearDown(self):
for teardown in self.test_run.test_case_teardown:
teardown(self.test_run)
@classmethod
def setUpClass(cls):
# print "SetUp for test run %s" % cls.test_run.name
for setup in cls.test_run.setup:
setup(cls.test_run)
@classmethod
def tearDownClass(cls):
# print "TearDown for test run %s" % cls.test_run.name
for teardown in cls.test_run.teardown:
teardown(cls.test_run)
class PyUnitTestRun(object):
"""PyUnit wrapper for a checkers TestRun.
This class can be used the same way as a checkers.TestRun class. The
difference is that it has been extended to support converting a Checkers test
run into a PyUnit test suite.
"""
def __init__(self, test_runs, pyunit_tests=None,
test_case_type=unittest.TestCase):
self.test_runs = test_runs
self.pyunit_tests = pyunit_tests
self.test_case_type = test_case_type
@property
def pyunit_test_cases(self):
return _ListOfTestsGenerator(self.pyunit_suite)
@property
def pyunit_suite(self):
"""Gets the unittest.TestSuite containing all of the tests.
This will return all of the tests that will be run including both Checkers
tests and normal PyUnit tests.
Returns:
unittest.TestSuite: The suite containing the PyUnit-ified test cases.
"""
pyunit_suite = unittest.TestSuite()
# Add the tests discovered by PyUnit.
if self.pyunit_tests:
pyunit_suite.addTest(self.pyunit_tests)
# Add the tests defined by Checkers.
loader = unittest.defaultTestLoader
loader.testMethodPrefix = 'Test'
for test_run in self.test_runs:
for suite in test_run.suites.values():
pyunit_test_case = self._CreatePyUnitTestCaseSuite(suite, test_run)
pyunit_suite.addTest(loader.loadTestsFromTestCase(pyunit_test_case))
return pyunit_suite
def _CreatePyUnitTestCaseSuite(self, suite, test_run):
"""Creates a TestCase object that represents the Checkers test suite.
Sorry for the naming here, but PyUnit is really confusing in that a TestCase
is actually a class representing a test suite. So this method is converting
a Checkers TestSuite into a PyUnit TestCase with a bunch of Test* methods.
Args:
suite: checkers.TestSuite, The test suite PyUnit needs to execute.
test_run: checkers.TestRun, The test run that is executing.
Returns:
unittest.TestCase: The TestCase class containing the suite's test cases.
"""
test_methods = {}
for test_case_name, test_case in suite.test_cases.iteritems():
test_closure = test_case.CreateTestClosure(test_run.components,
test_suite=suite,
test_run=test_run)
test_methods[test_case_name] = self._CreatePyUnitTestMethod(test_closure)
test_case_name = 't' + test_case_name[1:]
test_methods[test_case_name] = self._CreatePyUnitTestMethod(test_closure)
test_methods['test_run'] = test_run
cls = type(suite.name, (PyUnitTestSuite,), test_methods)
cls.__module__ = test_run.module
return cls
def _CreatePyUnitTestMethod(self, test_closure):
"""Creates a method that can be called by the PyUnit framework.
By default, each test method gets passed the TestCase instance when it gets
called by PyUnit, so this wrapper just takes that test case and throws it
away.
Args:
test_closure: function, The test case with all of the args assigned.
Returns:
function: The test method that ignores the test_case parameter.
"""
def PyUnitTestMethod(test_case): # pylint: disable=unused-argument
"""Wrapper function that discards the test_case and calls the test method.
Args:
test_case: unittest.TestCase, The executing test case (ignored).
"""
test_closure()
test_method = PyUnitTestMethod
test_method.func_name = test_closure.func_name
if not test_method.func_name.startswith('Test'):
test_method.func_name = 'Test' + test_method.func_name
test_method.func_doc = test_closure.func_doc
return test_method
def LoadTests(test_runs, include_pyunit_tests, module=None):
"""Function that takes in the tests that PyUnit has already discovered."""
if not module:
module = sys.modules['__main__']
def LoadWrapper(loader, tests, pattern): # pylint: disable=unused-argument,g-line-too-long
if not include_pyunit_tests:
tests = unittest.TestSuite()
result = None
try:
result = PyUnitTestRun(test_runs, tests).pyunit_suite
except:
traceback.print_exc()
raise
return result
setattr(module, 'load_tests', LoadWrapper)
_SHUTDOWN_HOOKS = {}
def RegisterShutdownHook(test_name, shutdown_hook_function):
if test_name not in _SHUTDOWN_HOOKS:
_SHUTDOWN_HOOKS[test_name] = []
_SHUTDOWN_HOOKS[test_name].append(shutdown_hook_function)
def _ShutdownHook(result):
checkers.Shutdown()
for hooks in _SHUTDOWN_HOOKS.values():
for hook in hooks:
hook(result)
def main(test_run, *args, **kwargs):
test_runs = test_run
if not isinstance(test_runs, list):
test_runs = [test_run]
if 'module' in kwargs:
LoadTests(test_runs, kwargs.pop('include_pyunit_tests', True),
module=kwargs['module'])
else:
LoadTests(test_runs, kwargs.pop('include_pyunit_tests', True))
if 'unittest_shutdown_hook' in kwargs:
RegisterShutdownHook('global', kwargs['unittest_shutdown_hook'])
kwargs['unittest_shutdown_hook'] = _ShutdownHook
return unittest.main(argv=[sys.argv[0]])
|
google/checkers_classic
|
python/integrations/pyunit/pyunit.py
|
Python
|
apache-2.0
| 7,520
|
# -*- coding: utf-8 -*-
import itertools
import pythoncom
import win32com.client as com
__author__ = 'cvargasc'
# Una intersección es un SignalController, tiene carriles de entrada, salida, cruces y semáforos.
class Interseccion:
# ------------------------
# Constructor
# ------------------------
# Inicializa la intersección con sus respectivos carriles de entrada, salida y cruces
# |-> calcula los grupos de cruces compatibles
def __init__(self, scComId):
pythoncom.CoInitialize()
sc = com.Dispatch(pythoncom.CoGetInterfaceAndReleaseStream(scComId, pythoncom.IID_IDispatch))
# ------------------------
# Atributos
# ------------------------
self.sc = sc # Defino el SignalController como un atributo de la clase
self.id = sc.AttValue('No') # Recupero y guardo el id del SignalController en la red
self.nombre = sc.AttValue('Name') # Recupero y guardo el nombre del SignalController
print " + Instanciando Interseccion para SignalController "+self.nombre
# Cada carril tiene un nombre definido por su coordenada y una lista.
# |-> lista[0] = ocupación | lista[1] = numVh | lista[2] = maxVh --> ver escenario.py!
# La ocupación se define como la razón entre de carros en el carril y la máxima cantidad de carros vista en el carril
# |-> Debe ser un número entre 0 y 1
self.carrilesEntrada = {} # La coordenada de los carriles de entrada hacen referencia al origen de los vehículos
self.conectores = {} # El link entre el carril de entrada y el carril de salida
self.carrilesSalida = {} # La coordenada de los carriles de salida hacen referencia al destino de los vehículos
# Cada cruce está compuesto por un semáforo (SignalHead), un carril de entrada y un conjunto de carriles de salida
# cruces[ID] --> El nombre de un cruce es del estilo: 0,1;0,-1:-1,0 Devuelve lista con los elementos del cruce:
# cruce[0] --> Signal controller de la interfaz COM
# cruce[1] --> String que contiene el nombre del carril de entrada
# cruce[2] --> Lista de Strings con los nombres de los carriles de salida
#####
# Por ejemplo para cambiar el color del semáforo asociado al SignalGruop del cruce '0,-1;0,1:1,0'
# cruces['0,-1;0,1:1,0'][0].SetAttValue("State", "GREEN")
self.cruces = {}
# Conjuntos de cruces que se pueden habilitar simultáneamente. Cada grupo es una tupla de strings que corresponden
# a los ids de los cruces pertenecientes a dicho grupo. El id del grupo es entero autonumérico. Se debe utilizar
# el método habilitarGrupo para habilitar de forma segura los cruces asociados a dicho grupo.
self.grupos = {}
# El id del grupo actualmente habilitado en esta intersección.
self.idGrupoActual = -1
# Voy a generar los cruces a partir de los SignalGroups
for sg in sc.SGs:
nombreSignalGroup = sg.AttValue('Name')
print "\n ++ Generando Cruce para SignalGroup "+nombreSignalGroup
carrilEntrada, carrilesSalida = nombreSignalGroup.split(';')[0],nombreSignalGroup.split(';')[1].split(':')
if not carrilEntrada in self.carrilesEntrada:
self.carrilesEntrada[carrilEntrada] = None
# ToDo manejar lógica similar para los conectores --> Debe ser compatible con múltiples conectores por cruce
for carrilSalida in carrilesSalida:
if not carrilSalida in self.carrilesSalida:
self.carrilesSalida[carrilSalida] = None
self.cruces[nombreSignalGroup] = [sg,carrilEntrada,carrilesSalida]
# Llamo el método para calcular los grupos de cruces que se pueden habilitar simultáneamente
self.__calcularGrupos()
print "\n + Instanciada Interseccion "+self.nombre+" :: "\
+ "\n Cruces Posibles = " + str(len(self.cruces))\
+ "\n Grupos Cruces Compatibles = " + str(len(self.grupos))
# ------------------------
# Métodos públicos
# ------------------------
def vincularCarrilesEscenario(self,carrilesEntrada, conectores, carrilesSalida):
for carrilEntrada, lista in carrilesEntrada.iteritems():
if not carrilEntrada in self.carrilesEntrada:
raise "El carril de entrada "+carrilEntrada+" ya debería estar registrado en la intersección!"
self.carrilesEntrada[carrilEntrada] = lista
for carrilSalida, lista in carrilesSalida.iteritems():
if not carrilSalida in self.carrilesSalida:
raise "El carril de salida "+carrilSalida+" ya debería estar registrado en la intersección!"
self.carrilesSalida[carrilSalida] = lista
# ToDo realizar verificación para los conectores
self.conectores = conectores
def habilitarGrupo(self, idGrupo):
self.__deshabilitarGrupo(self.idGrupoActual)
for idCruce in self.grupos[idGrupo]:
self.cruces[idCruce][0].SetAttValue("State", "GREEN")
self.idGrupoActual = idGrupo
# ------------------------
# Métodos privados
# ------------------------
# Método encargado de calcular los grupos de cruces que se pueden habilitar simultáneamente
# Se calculan todas las combinaciones posibles entre los cruces. Se miran en cuales de estas combinaciones
# todos los cruces son compatibles
def __calcularGrupos(self):
print " ++ Calculando las combinaciones posibles de cruces..."
combinaciones = []
for i in range(1,len(self.cruces)):
#print " +++ Generando las combinaciones de "+str(i)+" cruces..."
combinaciones.extend(itertools.combinations(self.cruces,i))
print " +++ Se generaron "+str(len(combinaciones))+" grupos de cruces posibles..."
i = 0
for combinacion in combinaciones:
if self.__grupoCompatible(combinacion):
self.grupos[i] = combinacion
print " ++++ Registrado el grupo "+str(i)+" : ",combinacion
i += 1
print " +++ Se identificaron ",len(self.grupos)," grupos de cruces compatibles..."
# ------------------------
# Métodos privados de apoyo
# ------------------------
def __deshabilitarGrupo(self, idGrupo):
# Implementado bajo EAFP https://docs.python.org/2/glossary.html#term-eafp
try:
for idCruce in self.grupos[idGrupo]:
self.cruces[idCruce][0].SetAttValue("State", "RED")
except KeyError:
if idGrupo != -1: # -1 es el valor de inicialización...
raise " !! Grupo "+str(idGrupo)+" inexistente!!"
# Un grupo se considera compatible si todos los cruces del grupo son compatibles entre ellos
def __grupoCompatible(self,grupo):
for cruce in grupo:
for candidato in grupo:
if not self.__sonCompatibles(cruce,candidato):
return False
return True
# Evalúa si dos cruces son compatibles. Se consideran compatibles si ninguno de sus segmentos se cortan
def __sonCompatibles(self,idCruce1,idCruce2):
segCruce1 = self.__generarSegmentos(idCruce1)
segCruce2 = self.__generarSegmentos(idCruce2)
for segmento1 in segCruce1:
for segmento2 in segCruce2:
if self.__segmentosSeCortan(segmento1,segmento2):
return False
return True
# Evalúa la posición de del punto respecto al segmento calculando el producto cruz / Regla de la mano derecha
# Un punto c es una tupla (xc,yc)
# Un segmento es una tupla de puntos (a,b) con a = (xa,ya) y b = (xb,yb)
# Devuelve un float:
# Si > 0 -> DERECHA | Si < 0 -> IZQUIERDA | Si = 0 -> Sobre el segmento
@staticmethod
def __posicionPtoRespectoSegmento(segmento, punto):
#debug = False
#if debug: print " ++++ Determinando donde se encuentra el punto",punto," respecto al segmento ",segmento
# Las coordenadas del segmento
xa,ya = segmento[0]
xb,yb = segmento[1]
# Las coordenadas del punto
xc,yc = punto
# Producto Cruz
respuesta = (xc-xa)*(yb-ya) - (xb-xa)*(yc-ya)
# if debug:
# if respuesta < 0:
# print " ++++ El punto",punto," se encuentra a la IZQUIERDA del segmento ",segmento
# elif respuesta == 0:
# print " ++++ El punto",punto," se encuentra a la ALINEADO con el segmento ",segmento
# else:
# print " ++++ El punto",punto," se encuentra a la DERECHA del segmento ",segmento
return respuesta
# Un cruce tiene un segmento por cada uno de sus carriles de salida, esta función genera todos los segmentos para
# el cruce cuyo ID es pasado como parámetro.
def __generarSegmentos(self, idCruce):
cruce = self.cruces[idCruce]
carrilEntrada = cruce[1].split(',')
carrilEntrada = float(carrilEntrada[0]), float(carrilEntrada[1])
carrilesSalida = cruce[2]
segmentos = []
for carrilSalida in carrilesSalida:
carrilSalida = carrilSalida.split(',')
carrilSalida = float(carrilSalida[0]), float(carrilSalida[1])
segmentos.append((carrilEntrada,carrilSalida))
#print " ++++ Segmentos generados para el cruce "+idCruce+" :: ",segmentos
return segmentos
# Determina si los dos segmentos pasados como parámetro se cortan
# Se considera que se cortan si tienen al menos un punto en común (incluidos los extremos)
# EXCEPTO Si tienen los dos puntos en común => Si segmento2 es el mismo segmento1 pero en dirección opuesta
# EXCEPTO Si el punto de origen es el mismo => Los carros partiendo del mismo carril no se chocan
# EXCEPTO Si el punto de origen del primer segmento es igual al punto de destino del segundo segmento Y
# el punto de destino del primer segmento está a la izquierda del segundo segmento => No se cortan
# si el destino del primer segmento está a la DERECHA del segundo segmento => Cruzar a la derecha
#
# Un segmento es una tupla de puntos (a,b) con a = (xa,ya) y b = (xb,yb)
def __segmentosSeCortan(self,segmento1,segmento2):
#debug = False
#if debug: print " ++++ Determinando si los segmentos ",segmento1," y ",segmento2," se cortan..."
# Caso en el cual los segmentos son los mismos pero en direcciones opuestas: por ejemplo de norte a sur
# y de sur a norte
if segmento1[1] == segmento2[0] and segmento1[0] == segmento2[1]:
#if debug: print " |-> NO se cortan :: Son el mismo en direcciones opuestas..."
return False # No se cortan, luego son compatibles
# Caso en el cual el punto de origen es el mismo: por ejemplo del norte hacia el sur y del norte hacia
# el occidente
if segmento1[0] == segmento2[0]:
#if debug: print " |-> NO se cortan :: El punto de origen es el mismo..."
return False # Salen del mismo sitio, luego son compatibles
# Caso en el cual el origen del primer segmento es el destino del segundo, y el destino del primero está
# a la derecha del segundo: por ejemplo de norte a sur y de sur a occidente ==> Incluye el primer caso!!
if segmento1[0] == segmento2[1] and self.__posicionPtoRespectoSegmento(segmento2, segmento1[1]) < 0 \
or segmento1[1] == segmento2[0] and self.__posicionPtoRespectoSegmento(segmento1, segmento2[1]):
#if debug: print " |-> NO se cortan :: Puntos de origen/destino iguales y destino a la derecha..."
return False
# Los demás casos:
posPto1 = self.__posicionPtoRespectoSegmento(segmento2, segmento1[0])
posPto2 = self.__posicionPtoRespectoSegmento(segmento2, segmento1[1])
if posPto1*posPto2 <= 0:
posPto1 = self.__posicionPtoRespectoSegmento(segmento1, segmento2[0])
posPto2 = self.__posicionPtoRespectoSegmento(segmento1, segmento2[1])
if posPto1*posPto2 <= 0:
#if debug: print " |-> SI se cortan :: Los segmentos se cortan..."
return True
#if debug: print " |-> NO se cortan :: Los segmentos no se cortan..."
return False
|
vargax/RAPfSC
|
RAPfSC/modelo.py
|
Python
|
gpl-2.0
| 12,512
|
import sys, ctypes
"""
Python approximation of power.asm
Returns sys.argv[2] ** sys.argv[3]
"""
raise NotImplementedError("This file isn't done yet.")
def echo_args(): # See function 'echo_args' in echo.asm
global stack, r12, r13, r14, rdi, rsi
stack.append(r12) # "push"
stack.append(r13) # "push"
stack.append(r14) # "push"
if rdi != 3: # zflag = 1 if rdi == 3 else 0
error1() # if zflag == 0 goto error1
r12 = rsi
rdi = r12[2] # rdi = pointer in asm
eax = int(rdi) # atoi(rdi) -> eax
if eax < 0:
error2()
r13 = eax # This was actually r13d in the code (r13[32:])
rdi = r12[1] # They're accessing memory locations (pointers)
eax = int(rdi) # atoi(rdi) -> eax
r14 = eax
eax = 1
def check():
global r13, r14, eax
ZF = 0 if r13 & r13 else 1
if ZF == 0:
gotit()
eax *= r14
r13 -= 1
check()
def gotit():
global answer, rdi, rsi, eas, rax
rdi = answer
rsi = eax # no sign extension in this code.
rax ^= rax # rax = xor(rax, rax)
printf()
done()
def printf(): # extern'd
print rsi ## Look up args for this
def main(): # See function 'main' in maxofthree.c
global rdi, rsi, rdx
rsi = list(reversed(sys.argv))
rdi = len(sys.argv)
echo_args()
exit(0)
if __name__ == "__main__":
""" Initialize registers (just because)
"""
rax = ctypes.c_int(64)
rdi = None
rsi = None
r12 = ctypes.c_int(64)
r13 = ctypes.c_int(64)
r14 = ctypes.c_int(64)
stack = []
main()
|
cmattoon/pyasm
|
examples/x-pow-y/power.py
|
Python
|
mit
| 1,642
|
from django.contrib import admin
from jmbo.admin import ModelBaseAdmin
from post.models import Post
admin.site.register(Post, ModelBaseAdmin)
|
praekelt/jmbo-post
|
post/admin.py
|
Python
|
bsd-3-clause
| 145
|
"""NDG Security Basic OpenID Authentication Interface.
A demonstration implementation of an authentication interface for
OpenIDProviderMiddleware WSGI. Username/password and OpenId user identifier
details are read from a config file and passed as keywords. This class is not
intended for production use.
NERC DataGrid Project
"""
__author__ = "P J Kershaw"
__date__ = "01/08/08"
__copyright__ = "(C) 2009 Science and Technology Facilities Council"
__contact__ = "Philip.Kershaw@stfc.ac.uk"
__revision__ = "$Id$"
import logging
log = logging.getLogger(__name__)
from ndg.security.server.wsgi.openid.provider.authninterface import \
AbstractAuthNInterface, AuthNInterfaceInvalidCredentials, \
AuthNInterfaceRetrieveError, AuthNInterfaceConfigError, \
AuthNInterfaceUsername2IdentifierMismatch
class BasicAuthNInterface(AbstractAuthNInterface):
'''Basic Authentication interface class for OpenIDProviderMiddleware
it uses username/password details retrieved from config file / keyword
entry. This class is for testing only. NOT for production use'''
IDENTITY_URI_TMPL_KEYNAME = 'identityUriTemplate'
USERCREDS_PROPERTY_KEYNAME = 'userCreds'
USERCREDS_KEYNAMES = ('password', 'identifiers')
propertyKeyNames = (
USERCREDS_PROPERTY_KEYNAME
)
getUserIdentifier = staticmethod(lambda identityURI:
identityURI.rsplit('/')[-1])
def __init__(self, **prop):
"""Make any initial settings
Settings are held in a dictionary which can be set from **prop,
a call to setProperties() or by passing settings in an XML file
given by propFilePath
@type **prop: dict
@param **prop: set properties via keywords
@raise AuthNInterfaceConfigError: error with configuration
"""
# Test/Admin username/password set from ini/kw args
self._identityUriTemplate = prop.get(
BasicAuthNInterface.IDENTITY_URI_TMPL_KEYNAME)
userCredsField = prop.get(
BasicAuthNInterface.USERCREDS_PROPERTY_KEYNAME)
if not userCredsField:
raise AuthNInterfaceConfigError('No "%s" config option found' %
BasicAuthNInterface.USERCREDS_PROPERTY_KEYNAME)
self._userCreds = {}
for userEntry in userCredsField.split():
# Split username, password and OpenID name list
userCreds = userEntry.strip().split(':')
# Split OpenID name list
userCreds[-1] = tuple(userCreds[-1].split(','))
# Convert into a dictionary indexed by username
userCredsKeys = BasicAuthNInterface.USERCREDS_KEYNAMES
self._userCreds[userCreds[0]] = dict(zip(userCredsKeys,
userCreds[1:]))
def logon(self, environ, identityURI, username, password):
"""Interface login method
@type environ: dict
@param environ: standard WSGI environ parameter
@type identityURI: basestring
@param identityURI: user's identity URL e.g.
'https://joebloggs.somewhere.ac.uk/'
@type username: basestring
@param username: username
@type password: basestring
@param password: corresponding password for username givens
@raise AuthNInterfaceInvalidCredentials: invalid username/password
@raise AuthNInterfaceUsername2IdentifierMismatch: no OpenID matching
the given username
"""
if self._userCreds.get(username, {}).get('password') != password:
raise AuthNInterfaceInvalidCredentials()
# Assume identifier is at the end of the URI
if identityURI is not None:
userIdentifier = BasicAuthNInterface.getUserIdentifier(identityURI)
if userIdentifier not in self._userCreds[username]['identifiers']:
raise AuthNInterfaceUsername2IdentifierMismatch()
def logout(self):
pass
def username2UserIdentifiers(self, environ, username):
"""Map the login username to an identifier which will become the
unique path suffix to the user's OpenID identifier. The
OpenIDProviderMiddleware takes self.urls['id_url'] and adds it to this
identifier:
identifier = self._authN.username2UserIdentifiers(environ,username)
identityURL = self.urls['url_id'] + '/' + identifier
@type environ: dict
@param environ: standard WSGI environ parameter
@type username: basestring
@param username: user identifier
@rtype: tuple
@return: identifiers to be used to make OpenID user identity URLs.
@raise AuthNInterfaceRetrieveError: error with retrieval of information
to identifier e.g. error with database look-up.
"""
try:
return self._userCreds[username]['identifiers']
except KeyError:
raise AuthNInterfaceRetrieveError('No entries for "%s" user' %
username)
from ndg.security.server.wsgi.utils.sessionmanagerclient import \
WSGISessionManagerClient, AuthNServiceInvalidCredentials
class BasicSessionManagerOpenIDAuthNInterface(BasicAuthNInterface):
'''Authentication interface class for OpenIDProviderMiddleware to enable
authentication to a Session Manager instance running in the same WSGI
stack or via a SOAP call to a remote service. This is a basic test
interface. See sessionmanager module for a full implementation linking to
a database via SQLAlchemy
'''
def __init__(self, **prop):
"""Extends BasicAuthNInterface initialising Session Manager Client
@type **prop: dict
@param **prop: set properties via keywords
@raise AuthNInterfaceConfigError: error with configuration
"""
user2Identifier = prop.pop('username2UserIdentifiers')
if user2Identifier:
self._username2Identifier = {}
for i in user2Identifier.split():
username, identifierStr = i.strip().split(':')
identifiers = tuple(identifierStr.split(','))
self._username2Identifier[username] = identifiers
else:
raise AuthNInterfaceConfigError('No "user2Identifier" config '
'option found')
self._client = WSGISessionManagerClient(**prop)
# This is set at login
self.sessionId = None
def logon(self, environ, userIdentifier, username, password):
"""Interface login method
@type environ: dict
@param environ: standard WSGI environ parameter
@type username: basestring
@param username: user identifier
@type password: basestring
@param password: corresponding password for username givens
@raise AuthNInterfaceUsername2IdentifierMismatch: no OpenID
identifiers match the given username
@raise AuthNInterfaceInvalidCredentials: invalid username/password
"""
if userIdentifier is not None and \
userIdentifier not in self._username2Identifier.get(username):
raise AuthNInterfaceUsername2IdentifierMismatch()
try:
self._client.environ = environ
connectResp = self._client.connect(username, passphrase=password)
self.sessionId = connectResp[-1]
log.debug("Connected to Session Manager with session ID: %s",
self.sessionId)
except AuthNServiceInvalidCredentials, e:
log.exception(e)
raise AuthNInterfaceInvalidCredentials()
def logout(self):
"""logout from the Session Manager
"""
try:
self._client.disconnect(sessID=self.sessionId)
except Exception, e:
log.exception(e)
raise AuthNInterfaceInvalidCredentials()
|
philipkershaw/ndg_security_server
|
ndg/security/server/wsgi/openid/provider/authninterface/basic.py
|
Python
|
bsd-3-clause
| 8,254
|
from pathlib import Path
from datetime import datetime, timedelta, timezone
from django.shortcuts import get_object_or_404
from rest_framework.views import APIView
from rest_framework.parsers import FileUploadParser
from rest_framework import serializers
from rest_framework.response import Response
from hav.utils.imaginary import generate_thumbnail_url, generate_srcset_urls
from ...permissions import IncomingBaseMixin
from hav.apps.sources.uploads.models import FileUpload
from hav.apps.sources.filesystem.api.serializers import (
FileDetailSerializer as FSFileDetailSerializer,
)
class BaseFileSerializer(serializers.ModelSerializer):
name = serializers.SerializerMethodField()
url = serializers.SerializerMethodField()
preview_url = serializers.SerializerMethodField()
path = serializers.SerializerMethodField()
@property
def _config(self):
return self.context["source_config"]
@property
def request(self):
return self.context["request"]
def get_full_name(self, upload):
return upload.file.name
def get_preview_url(self, upload):
return generate_thumbnail_url(upload)
def get_url(self, upload):
rel_url = self._config.to_url(upload.pk, self.request)
return self.request.build_absolute_uri(rel_url)
def get_name(self, upload):
return Path(upload.file.name).name
def get_path(self, upload):
return str(upload.pk)
class Meta:
model = FileUpload
fields = (
"created_at",
"url",
"path",
"preview_url",
"name",
)
class FileDetailSerializer(FSFileDetailSerializer):
@property
def upload(self):
return self.context["upload"]
def get_srcset(self, p):
return generate_srcset_urls(self.upload)
def get_preview_url(self, p):
return generate_thumbnail_url(self.upload)
def get_url(self, path):
return self.request.build_absolute_uri(
self._config.to_url(self.upload.pk, self.request)
)
class CreateFileSerializer(serializers.ModelSerializer):
class Meta:
model = FileUpload
fields = ("file",)
class FileUploadBaseView(IncomingBaseMixin, APIView):
source_config = None
@property
def context(self):
return {"request": self.request, "source_config": self.source_config}
class FileUploadView(FileUploadBaseView):
def get(self, request):
date_cutoff = datetime.now(timezone.utc) - timedelta(hours=24)
serializer = BaseFileSerializer(
FileUpload.objects.filter(
created_by=self.request.user, created_at__gt=date_cutoff
).order_by("-created_at"),
many=True,
context=self.context,
)
return Response(data=serializer.data)
class FileDetailView(FileUploadBaseView):
parser_classes = [FileUploadParser]
def get(self, request, pk):
upload = get_object_or_404(FileUpload, pk=pk)
context = self.context
context.update({"upload": upload})
serializer = FileDetailSerializer(
instance=Path(self.source_config.root_path).joinpath(upload.file.name),
context=context,
)
return Response(serializer.data)
def put(self, request, **kwargs):
serializer = CreateFileSerializer(data=request.data)
serializer.is_valid(raise_exception=True)
fu = serializer.save(created_by=request.user)
serializer = BaseFileSerializer(instance=fu, context=self.context)
return Response(data=serializer.data, status=201)
|
whav/hav
|
src/hav/apps/sources/uploads/api/views.py
|
Python
|
gpl-3.0
| 3,645
|
#!/usr/bin/env python
# coding: utf-8
import sys
import flask
from flask import g, url_for
from social.apps.flask_app.default.models import init_social
from social.actions import do_auth, do_complete
from social.apps.flask_app.utils import load_strategy, load_backend
from social.backends.facebook import FacebookOAuth2
from social.strategies.flask_strategy import FlaskStrategy
from social.utils import build_absolute_uri
# from social.strategies.utils import set_current_strategy_getter
# from social.apps.flask_app.routes import social_auth
# from social.apps.flask_app.template_filters import backends
from extensions import db
def init_social_models(app):
try:
import viralata
except:
sys.path.append('..')
init_social(app, db.session)
# set_current_strategy_getter(load_strategy)
def get_auth_url(backend, redirect_uri, *args, **kwargs):
uri = redirect_uri
if uri and not uri.startswith('/'):
uri = url_for(uri, backend=backend)
g.strategy = load_strategy()
g.backend = load_backend(g.strategy, backend, redirect_uri=uri,
*args, **kwargs)
resp = do_auth(g.backend)
return resp.location
def get_username(backend, redirect_uri):
g.strategy = load_strategy()
g.backend = load_backend(g.strategy, backend, redirect_uri=redirect_uri)
do_complete(g.backend, login=do_login)
return g.user.username
class HeadlessFacebookStrategy(FlaskStrategy):
name = 'facebook'
def build_absolute_uri(self, path=None):
# TODO: Quando a API diretamente (e não a partir de outro site) não tem
# "referrer". Como resolver isso?
return build_absolute_uri(
flask.request.referrer, path).partition("&")[0]
class HeadlessFacebookBackend(FacebookOAuth2):
name = 'facebook'
def validate_state(self):
if not self.STATE_PARAMETER and not self.REDIRECT_STATE:
return None
# state = self.get_session_state()
request_state = self.get_request_state()
# if not request_state:
# raise AuthMissingParameter(self, 'state')
return request_state
def request(self, url, method='GET', *args, **kwargs):
from social.utils import user_agent
from social.exceptions import AuthFailed
from requests import request as req
kwargs.setdefault('headers', {})
if self.setting('VERIFY_SSL') is not None:
kwargs.setdefault('verify', self.setting('VERIFY_SSL'))
kwargs.setdefault('timeout', self.setting('REQUESTS_TIMEOUT') or
self.setting('URLOPEN_TIMEOUT'))
if self.SEND_USER_AGENT and 'User-Agent' not in kwargs['headers']:
kwargs['headers']['User-Agent'] = user_agent()
try:
# import IPython; IPython.embed()
response = req(method, url, *args, **kwargs)
except ConnectionError as err:
raise AuthFailed(self, str(err))
try:
response.raise_for_status()
except:
print(response.json())
print(url)
print(kwargs)
raise
return response
def do_login(backend, user, social_user):
print("do_login", user, social_user)
def insert_user(user, is_new, **kwargs):
'''This function is used to add the user to the global var that will be used
get_username'''
if user:
g.user = user
|
okfn-brasil/viralata
|
viralata/auths.py
|
Python
|
agpl-3.0
| 3,435
|
#! /usr/bin/env python
import sys
import numpy as np
import pandas as pd
from unittest import TestCase
from pandashells.lib import io_lib
from mock import patch, MagicMock
try:
from StringIO import StringIO
except ImportError:
from io import StringIO
class IOLibTests(TestCase):
def test_get_separator_csv(self):
config_dict = {'io_input_type': 'csv'}
args = MagicMock(input_options=['csv'])
self.assertEqual(',', io_lib.get_separator(args, config_dict))
def test_get_separator_table(self):
config_dict = {'io_input_type': 'csv'}
args = MagicMock(input_options=['table'])
self.assertEqual(r'\s+', io_lib.get_separator(args, config_dict))
def test_get_separator_default(self):
config_dict = {'io_input_type': 'csv'}
args = MagicMock(input_options=[])
self.assertEqual(',', io_lib.get_separator(args, config_dict))
def test_get_header_names_with_names_and_header(self):
args = MagicMock(names=['a'], input_options=[])
header, names = io_lib.get_header_names(args)
self.assertEqual(header, 0)
self.assertEqual(names, ['a'])
def test_get_header_names_with_names_and_no_header(self):
args = MagicMock(names=['a'], input_options=['noheader'])
header, names = io_lib.get_header_names(args)
self.assertEqual(header, None)
self.assertEqual(names, ['a'])
def test_get_header_names_with_no_names_and_header(self):
args = MagicMock(names=None, input_options=[])
header, names = io_lib.get_header_names(args)
self.assertEqual(header, 'infer')
self.assertEqual(names, None)
def test_get_header_names_with_no_names_and_no_header(self):
args = MagicMock(names=None, input_options=['noheader'])
header, names = io_lib.get_header_names(args)
self.assertEqual(header, None)
self.assertEqual(names, None)
def test_get_nan_rep_with_nan(self):
config_dict = {'io_output_na_rep': '-'}
args = MagicMock(io_output_na_rep=['-'])
self.assertEqual(io_lib.get_nan_rep(args, config_dict), '-')
def test_get_nan_rep_no_arg(self):
config_dict = {'io_output_na_rep': 'nan'}
args = MagicMock(io_output_na_rep=None)
self.assertTrue(np.isnan(io_lib.get_nan_rep(args, config_dict)))
@patch('pandashells.lib.io_lib.sys.stdin')
@patch('pandashells.lib.io_lib.pd')
def test_df_from_input_no_infile(self, pd_mock, stdin_mock):
pd_mock.read_csv = MagicMock(return_value=pd.DataFrame())
args = MagicMock(names=[], input_options=[])
io_lib.df_from_input(args, in_file=None)
self.assertEqual(pd_mock.read_csv.call_args_list[0][0][0], stdin_mock)
@patch('pandashells.lib.io_lib.pd')
def test_df_from_input_with_infile(self, pd_mock):
pd_mock.read_csv = MagicMock(return_value=pd.DataFrame())
args = MagicMock(names=[], input_options=[])
in_file = MagicMock()
io_lib.df_from_input(args, in_file=in_file)
self.assertEqual(pd_mock.read_csv.call_args_list[0][0][0], in_file)
@patch('pandashells.lib.io_lib.pd')
def test_df_from_input_no_input(self, pd_mock):
def raiser(*args, **kwargs):
raise ValueError()
pd_mock.read_csv = raiser
args = MagicMock(names=[], input_options=[])
in_file = MagicMock()
with self.assertRaises(SystemExit):
io_lib.df_from_input(args, in_file=in_file)
@patch('pandashells.lib.io_lib.pd')
def test_df_from_input_create_names(self, pd_mock):
df_in = pd.DataFrame(columns=[1, 2])
pd_mock.read_csv = MagicMock(return_value=df_in)
pd_mock.Index = pd.Index
args = MagicMock(names=[], input_options=['noheader'])
df = io_lib.df_from_input(args, in_file=None)
self.assertEqual(['c0', 'c1'], list(df.columns))
@patch('pandashells.lib.io_lib.sys')
def test_csv_writer(self, sys_mock):
sys_mock.stdout = StringIO()
df = pd.DataFrame([[1, 2], [3, 4]], columns=['a', 'b'], index=[0, 1])
io_lib.csv_writer(df, header=True, index=False, na_rep='nan')
sys.stdout = sys.__stdout__
self.assertEqual('"a","b"\n1,2\n3,4\n', sys_mock.stdout.getvalue())
@patch('pandashells.lib.io_lib.sys')
def test_table_writer(self, sys_mock):
sys_mock.stdout = StringIO()
df = pd.DataFrame([[1, 2], [3, 4]], columns=['a', 'b'], index=[0, 1])
io_lib.table_writer(df, header=True, index=False, na_rep='nan')
sys.stdout = sys.__stdout__
self.assertEqual(' a b\n 1 2\n 3 4\n', sys_mock.stdout.getvalue())
@patch('pandashells.lib.io_lib.sys')
def test_html_writer(self, sys_mock):
sys_mock.stdout = StringIO()
df = pd.DataFrame([[1, 2], [3, 4]], columns=['a', 'b'], index=[0, 1])
io_lib.html_writer(df, header=True, index=False)
sys.stdout = sys.__stdout__
html = sys_mock.stdout.getvalue()
self.assertTrue('<th>a</th>' in html)
self.assertTrue('<th>b</th>' in html)
self.assertTrue('<td>1</td>' in html)
@patch('pandashells.lib.io_lib.get_nan_rep', MagicMock(return_value='nan'))
@patch('pandashells.lib.io_lib.csv_writer')
def test_df_to_output_no_header_no_index(self, csv_writer_mock):
args_mock = MagicMock(output_options=['csv', 'noheader'])
df = pd.DataFrame([[1, 2], [3, 4]], columns=['a', 'b'], index=[0, 1])
io_lib.df_to_output(args_mock, df)
csv_writer_mock.assert_called_with(df, False, False, 'nan')
@patch('pandashells.lib.io_lib.get_nan_rep', MagicMock(return_value='nan'))
@patch('pandashells.lib.io_lib.csv_writer')
def test_df_to_output_csv_type(self, csv_writer_mock):
args_mock = MagicMock(output_options=['csv', 'index'])
df = pd.DataFrame([[1, 2], [3, 4]], columns=['a', 'b'], index=[0, 1])
io_lib.df_to_output(args_mock, df)
csv_writer_mock.assert_called_with(df, True, True, 'nan')
@patch('pandashells.lib.io_lib.get_nan_rep', MagicMock(return_value='nan'))
@patch('pandashells.lib.io_lib.csv_writer')
def test_df_to_output_bad_type(self, csv_writer_mock):
args_mock = MagicMock(output_options=['bad'])
df = pd.DataFrame([[1, 2], [3, 4]], columns=['a', 'b'], index=[0, 1])
io_lib.df_to_output(args_mock, df)
csv_writer_mock.assert_called_with(df, True, False, 'nan')
@patch('pandashells.lib.io_lib.sys')
def test_df_to_output_broken_stdout(self, sys_mock):
args_mock = MagicMock(output_options=['table'])
df = pd.DataFrame([[1, 2], [3, 4]], columns=['a', 'b'], index=[0, 1])
sys_mock.stdout.write = MagicMock(side_effect=IOError)
io_lib.df_to_output(args_mock, df)
self.assertTrue(sys_mock.stdout.write.called)
|
shaunstanislaus/pandashells
|
pandashells/test/io_lib_tests.py
|
Python
|
bsd-2-clause
| 6,858
|
# Copyright 2013-2021 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
import json
from os import path
from llnl.util import filesystem
from spack import *
class Hipsycl(CMakePackage):
"""hipSYCL is an implementation of the SYCL standard programming model
over NVIDIA CUDA/AMD HIP"""
homepage = "https://github.com/illuhad/hipSYCL"
url = "https://github.com/illuhad/hipSYCL/archive/v0.8.0.tar.gz"
git = "https://github.com/illuhad/hipSYCL.git"
maintainers = ["nazavode"]
provides("sycl")
version("stable", branch="stable", submodules=True)
version(
"0.9.1",
commit="fe8465cd5399a932f7221343c07c9942b0fe644c",
submodules=True)
version(
"0.8.0",
commit="2daf8407e49dd32ebd1c266e8e944e390d28b22a",
submodules=True,
)
variant(
"cuda",
default=False,
description="Enable CUDA backend for SYCL kernels",
)
depends_on("cmake@3.5:", type="build")
depends_on("boost +filesystem", when="@:0.8")
depends_on("boost@1.67.0:1.69.0 +filesystem +fiber +context cxxstd=17", when='@0.9.1:')
depends_on("python@3:")
depends_on("llvm@8: +clang", when="~cuda")
depends_on("llvm@9: +clang", when="+cuda")
# LLVM PTX backend requires cuda7:10.1 (https://tinyurl.com/v82k5qq)
depends_on("cuda@9:10.1", when="@0.8.1: +cuda")
# hipSYCL@:0.8.0 requires cuda@9:10.0 due to a known bug
depends_on("cuda@9:10.0", when="@:0.8.0 +cuda")
conflicts(
"%gcc@:4",
when='@:0.9.0',
msg="hipSYCL needs proper C++14 support to be built, %gcc is too old",
)
conflicts(
"%gcc@:8",
when='@0.9.1:',
msg="hipSYCL needs proper C++17 support to be built, %gcc is too old")
conflicts(
"^llvm build_type=Debug",
when="+cuda",
msg="LLVM debug builds don't work with hipSYCL CUDA backend; for "
"further info please refer to: "
"https://github.com/illuhad/hipSYCL/blob/master/doc/install-cuda.md",
)
def cmake_args(self):
spec = self.spec
args = [
"-DWITH_CPU_BACKEND:Bool=TRUE",
# TODO: no ROCm stuff available in spack yet
"-DWITH_ROCM_BACKEND:Bool=FALSE",
"-DWITH_CUDA_BACKEND:Bool={0}".format(
"TRUE" if "+cuda" in spec else "FALSE"
),
# prevent hipSYCL's cmake to look for other LLVM installations
# if the specified one isn't compatible
"-DDISABLE_LLVM_VERSION_CHECK:Bool=TRUE",
]
# LLVM directory containing all installed CMake files
# (e.g.: configs consumed by client projects)
llvm_cmake_dirs = filesystem.find(
spec["llvm"].prefix, "LLVMExports.cmake"
)
if len(llvm_cmake_dirs) != 1:
raise InstallError(
"concretized llvm dependency must provide "
"a unique directory containing CMake client "
"files, found: {0}".format(llvm_cmake_dirs)
)
args.append(
"-DLLVM_DIR:String={0}".format(path.dirname(llvm_cmake_dirs[0]))
)
# clang internal headers directory
llvm_clang_include_dirs = filesystem.find(
spec["llvm"].prefix, "__clang_cuda_runtime_wrapper.h"
)
if len(llvm_clang_include_dirs) != 1:
raise InstallError(
"concretized llvm dependency must provide a "
"unique directory containing clang internal "
"headers, found: {0}".format(llvm_clang_include_dirs)
)
args.append(
"-DCLANG_INCLUDE_PATH:String={0}".format(
path.dirname(llvm_clang_include_dirs[0])
)
)
# target clang++ executable
llvm_clang_bin = path.join(spec["llvm"].prefix.bin, "clang++")
if not filesystem.is_exe(llvm_clang_bin):
raise InstallError(
"concretized llvm dependency must provide a "
"valid clang++ executable, found invalid: "
"{0}".format(llvm_clang_bin)
)
args.append(
"-DCLANG_EXECUTABLE_PATH:String={0}".format(llvm_clang_bin)
)
# explicit CUDA toolkit
if "+cuda" in spec:
args.append(
"-DCUDA_TOOLKIT_ROOT_DIR:String={0}".format(
spec["cuda"].prefix
)
)
return args
@run_after("install")
def filter_config_file(self):
config_file_paths = filesystem.find(self.prefix, "syclcc.json")
if len(config_file_paths) != 1:
raise InstallError(
"installed hipSYCL must provide a unique compiler driver "
"configuration file, found: {0}".format(config_file_paths)
)
config_file_path = config_file_paths[0]
with open(config_file_path) as f:
config = json.load(f)
# 1. Fix compiler: use the real one in place of the Spack wrapper
config["default-cpu-cxx"] = self.compiler.cxx
# 2. Fix stdlib: we need to make sure cuda-enabled binaries find
# the libc++.so and libc++abi.so dyn linked to the sycl
# ptx backend
rpaths = set()
so_paths = filesystem.find(self.spec["llvm"].prefix, "libc++.so")
if len(so_paths) != 1:
raise InstallError(
"concretized llvm dependency must provide a "
"unique directory containing libc++.so, "
"found: {0}".format(so_paths)
)
rpaths.add(path.dirname(so_paths[0]))
so_paths = filesystem.find(self.spec["llvm"].prefix, "libc++abi.so")
if len(so_paths) != 1:
raise InstallError(
"concretized llvm dependency must provide a "
"unique directory containing libc++abi.so, "
"found: {0}".format(so_paths)
)
rpaths.add(path.dirname(so_paths[0]))
config["default-cuda-link-line"] += " " + " ".join(
"-rpath {0}".format(p) for p in rpaths
)
# Replace the installed config file
with open(config_file_path, "w") as f:
json.dump(config, f, indent=2)
|
LLNL/spack
|
var/spack/repos/builtin/packages/hipsycl/package.py
|
Python
|
lgpl-2.1
| 6,401
|
#! /usr/bin/env python
from random import shuffle
from math import sqrt
from functools import total_ordering
from heapq import heappop, heappush
def dist(puzzle, idx1, idx2):
"""Get the distance between two cells (as the number of moves)."""
size = int(sqrt(len(puzzle)))
# difference of row and column number
rdiff = abs((idx1 // size) - (idx2 // size))
cdiff = abs((idx1 % size) - (idx2 % size))
return rdiff + cdiff
def hamming_dist(puzzle):
"""Return the number of misplaced tiles."""
return len([i for i, v in enumerate(puzzle) if v != i+1 and v != len(puzzle)])
def manhattan_dist(puzzle):
"""Return the sum of the distances of the tiles from their goal positions."""
return sum([dist(puzzle, i, v-1) for i, v in enumerate(puzzle) if v != len(puzzle)])
def is_solvable(puzzle):
"""Check if the puzzle is solvable."""
# count the number of inversions
inversions = 0
for i, v in [(i, v) for i, v in enumerate(puzzle) if v != len(puzzle)]:
j = i + 1
while j < len(puzzle):
if puzzle[j] < v:
inversions += 1
j += 1
# check if the puzzle is solvable
size = int(sqrt(len(puzzle)))
# grid width is odd and number of inversion even -> solvable
if size % 2 != 0 and inversions % 2 == 0:
return True
# grid width is even
if size % 2 == 0:
emptyrow = size - puzzle.index(len(puzzle)) // size
return (emptyrow % 2 != 0) == (inversions % 2 == 0)
return False
def is_solved(puzzle):
"""Return True is the puzzle is solved, False otherwise."""
# simply check if the list is sorted
return all(puzzle[i] < puzzle[i + 1] for i in range(len(puzzle) - 1))
def _neighbors(puzzle, location):
"""Get the indexes of the neighbors cells."""
size = int(sqrt(len(puzzle)))
# above cell
if location - size >= 0:
yield (location - size)
# left cell
if (location % size) - 1 >= 0:
yield (location - 1)
# right cell
if (location % size) + 1 < size:
yield (location + 1)
# below cell
if location + size < len(puzzle):
yield (location + size)
def _swap(puzzle, moves, x, y):
"""Swap two cells of the puzzle and store the move."""
# the move has to be (non empty cell, empty cell)
x, y = (x, y) if puzzle[y] == len(puzzle) else (y, x)
puzzle[x], puzzle[y] = puzzle[y], puzzle[x]
moves.append((x, y))
def _hitch(puzzle, location, immovables):
"""Get the index of the cell that blocks the cell in location from
its final position, otherwise returns None."""
dest = puzzle[location] - 1
close = [i for i in _neighbors(puzzle, location)
if puzzle[i] not in immovables and puzzle[i] != len(puzzle)
and dist(puzzle, i, dest) < dist(puzzle, location, dest)]
if close:
# returns the hitch with the minimum distance from the final position
return min(close, key=lambda i: dist(puzzle, i, dest))
def _approach(puzzle, moves, location):
"""Check if the empty cell is one of the neighbors of the cell in location,
if the empty cell is closer to the cell final position _swap the empty cells
with the cell in location."""
empty = puzzle.index(len(puzzle))
# if the empty cell is between the cell neighbors
if empty in _neighbors(puzzle, location):
dest = puzzle[location] - 1
# compare distance between actual location / empty cell and final position
if dist(puzzle, empty, dest) < dist(puzzle, location, dest):
# approach the cell in location
_swap(puzzle, moves, empty, location)
return True
return False
def _is_movable(puzzle, location):
"""Check if the empty cell is one of the neighbors of the cell in location."""
return puzzle.index(len(puzzle)) in _neighbors(puzzle, location)
def _slide_empty_rec(puzzle, moves, location, immovables):
"""Apply a recursive algorithm to slide the empty cell."""
# if this cell can't be moved try to move one of its neighbor
if not _is_movable(puzzle, location):
immovables.add(puzzle[location])
# for each neighbors that could be moved (prevent infinite loops)
close = [x for x in _neighbors(puzzle, location)
if puzzle[x] not in immovables]
# first the cells closer to the empty one (shortest path)
close.sort(key=lambda e: dist(puzzle, e, puzzle.index(len(puzzle))))
for n in close:
if _slide_empty_rec(puzzle, moves, n, immovables):
# now the empty cell is a neighbor of the cell in location
_slide_empty_rec(puzzle, moves, location, immovables)
return True
return False
else:
immovables.discard(puzzle[location])
# swap with the empty cell
_swap(puzzle, moves, puzzle.index(len(puzzle)), location)
return True
def _slide_empty(puzzle, moves, location, immovables=None):
"""Replace the cell in location with the empty one."""
# init the set of the cells that can't be moved
unmov = set(immovables) if immovables else set()
return _slide_empty_rec(puzzle, moves, location, unmov)
def _place(puzzle, moves, piece, immovables=None):
"""Try to place a specific piece of the puzzle."""
idx = puzzle.index(piece)
unmovables = immovables or set([i + 1 for i in range(piece)])
# while the piece is not in its final position
while idx != piece - 1:
# try a simple approach swapping with the empty cell
if not _approach(puzzle, moves, idx):
# search the hitch
h = _hitch(puzzle, idx, unmovables)
# tries to slide the empty cell in place of the hitch
if not _slide_empty(puzzle, moves, h, unmovables):
return False
# update piece location
idx = puzzle.index(piece)
return True
def _place_3(puzzle8, moves):
"""Place the piece number 3 in a puzzle 8."""
if _place(puzzle8, moves, 3):
return True
# check if the piece is under its final location
if puzzle8.index(3) != 5:
return False
# place the empty cell in the first location
_slide_empty(puzzle8, moves, 0, set((2, 3)))
# place 3 in its final location
_slide_empty(puzzle8, moves, 5, set((puzzle8[4],)))
# place 2 and 1
_slide_empty(puzzle8, moves, 0, set((1, 3)))
_swap(puzzle8, moves, 0, 3)
return True
def _place_5(puzzle8, moves):
"""Place the piece number 5 in a puzzle 8."""
if _place(puzzle8, moves, 5):
return True
# check if the piece is under its final location
if puzzle8.index(5) != 7:
return False
# place the empty cell above 5
_slide_empty(puzzle8, moves, 4, set((5,)))
# approach 5 to its final location
_swap(puzzle8, moves, 4, 7)
# rotate the bottom right square once
_slide_empty(puzzle8, moves, 5, set((5,)))
_swap(puzzle8, moves, 5, 4)
# place the empty cell in the bottom left corner
_slide_empty(puzzle8, moves, 6, set((puzzle8[7],)))
# place 5 and 6
_slide_empty(puzzle8, moves, 4, set((4,)))
_swap(puzzle8, moves, 5, 4)
_swap(puzzle8, moves, 5, 8)
return True
def _place_6(puzzle8, moves):
"""Place the piece number 6 in a puzzle 8."""
if _place(puzzle8, moves, 6):
return True
# check the location of 6
location = puzzle8.index(6)
if location == 8:
# place the empty cell in the first location
_slide_empty(puzzle8, moves, 3, set((5, 6)))
# place 6 in its final location
_slide_empty(puzzle8, moves, 8, set((puzzle8[7],)))
# place 5 and 4
_slide_empty(puzzle8, moves, 4, set((6,)))
_swap(puzzle8, moves, 3, 4)
_swap(puzzle8, moves, 3, 6)
elif location == 7 and puzzle8.index(9) == 6:
# place the empty cell above 6
_slide_empty(puzzle8, moves, 4, set((6,)))
# _swap the empty cell with 6
_swap(puzzle8, moves, 7, 4)
# place the empty cell in the final location of 6
_slide_empty(puzzle8, moves, 5, set((6,)))
# place 6, 5 and 4
_swap(puzzle8, moves, 5, 4)
_swap(puzzle8, moves, 3, 4)
_swap(puzzle8, moves, 6, 3)
else:
return False
return True
def solve8_heuristic(puzzle8):
"""Solve a 8 puzzle using heuristic."""
# check the size of the puzzle
if len(puzzle8) != 9:
raise ValueError('Invalid size')
# check if the puzzle is solvable
if not is_solvable(puzzle8) or is_solved(puzzle8):
return None
moves = []
p8 = list(puzzle8)
# place one piece after the other
_place(p8, moves, 1)
_place(p8, moves, 2)
_place_3(p8, moves)
_place(p8, moves, 4)
_place_5(p8, moves)
_place_6(p8, moves)
_place(p8, moves, 7)
_place(p8, moves, 8)
return tuple(moves) if is_solved(p8) else None
def solve3_heuristic(puzzle3):
"""Solve a 3 puzzle using heuristic."""
# check the size of the puzzle
if len(puzzle3) != 4:
raise ValueError('Invalid size')
# check if the puzzle is solvable
if not is_solvable(puzzle3) or is_solved(puzzle3):
return None
moves = []
p3 = list(puzzle3)
# place one piece after the other
for i in [1, 2, 3]:
_place(p3, moves, i)
return tuple(moves) if is_solved(p3) else None
def _place_13(puzzle15, moves):
"""Place the piece number 13 in a puzzle 15."""
if _place(puzzle15, moves, 13, set((1, 2, 3, 4, 5, 9, 13))):
return True
# check if the piece is next to its final location
if puzzle15.index(13) != 13:
return False
# remove the second element from the column
_slide_empty(puzzle15, moves, 4, set((1, 2, 3, 4, 9, 13)))
# place the emtpy cell in the final position
_slide_empty(puzzle15, moves, 12)
# place 13 in its final position
_swap(puzzle15, moves, 13, 12)
# place the empty cell above 13
_slide_empty(puzzle15, moves, 8, set((13,)))
# place cells 9 and 5
_swap(puzzle15, moves, 4, 8)
_swap(puzzle15, moves, 4, 5)
return True
def _place_4(puzzle15, moves):
"""Place the piece number 4 in a puzzle 15."""
if _place(puzzle15, moves, 4):
return True
# check if the piece is under its final location
if puzzle15.index(4) != 7:
return False
# remove the second element from the row
_slide_empty(puzzle15, moves, 1, set((1, 3, 4)))
# place the emtpy cell in the final position
_slide_empty(puzzle15, moves, 3, set((1,)))
# place 4 in its final position
_swap(puzzle15, moves, 7, 3)
# place the empty cell next to 4
_slide_empty(puzzle15, moves, 2, set((4,)))
# place cells 3 and 2
_swap(puzzle15, moves, 2, 1)
_swap(puzzle15, moves, 1, 5)
return True
def _puzzle8(puzzle15):
"""Return the bottom right puzzle 8 from a puzzle 15."""
map8 = {6:1, 7:2, 8:3, 10:4, 11:5, 12:6, 14:7, 15:8, 16:9}
return [map8[v] for v in puzzle15 if v in map8]
def _puzzle15(puzzle15, moves, puzzle8, moves8):
"""Return the bottom right puzzle 8 from a puzzle 15."""
map15 = {1:6, 2:7, 3:8, 4:10, 5:11, 6:12, 7:14, 8:15, 9:16}
pos = lambda i: (i % 3) + 1 + (4 * ((i // 3) + 1))
# place the cells in the 15 puzzle
for i, v in enumerate(puzzle8):
puzzle15[pos(i)] = map15[v]
# add the moves used to solve the 8 puzzle
for m in moves8:
moves.append((pos(m[0]), pos(m[1])))
def solve15_heuristic(puzzle15, subOpt=False):
"""Solve a 15 puzzle using heuristic."""
# check the size of the puzzle
if len(puzzle15) != 16:
raise ValueError('Invalid size')
# check if the puzzle is solvable
if not is_solvable(puzzle15) or is_solved(puzzle15):
return False
moves = []
immovables = set()
p15 = list(puzzle15)
# place the first row
for p in [1, 2, 3]:
immovables.add(p)
_place(p15, moves, p, immovables)
_place_4(p15, moves)
# place the first column
for p in [5, 9]:
immovables.add(p)
_place(p15, moves, p, immovables)
_place_13(p15, moves)
# build and solve the sub-puzzle 8
p8 = _puzzle8(p15)
m8 = solve(p8) if subOpt else solve8_heuristic(p8)
if not m8:
return None
# fill the puzzle 15, here the 8 puzzle must be solved by using m8 steps
_puzzle15(p15, moves, range(1, 10), m8)
# return moves if the puzzle is solved, None otherwise
return tuple(moves) if is_solved(p15) else None
@total_ordering
class Puzzle:
"""Represent the current configuration of a puzzle."""
def __init__(self, puzzle, steps, priority, lastStep=None):
self.puzzle = puzzle
self.steps = steps
self.priority = priority
self.lastStep = lastStep[::-1] if lastStep else None
def __eq__(self, other):
"""Check if both instances have the same priority."""
return self.priority == other.priority
def __lt__(self, other):
"""Check if this instance has a lower priority."""
return self.priority < other.priority
def __repr__(self):
"""Return a string representation of this instance."""
info = 'Puzzle: {}\nSteps: {}\nPriority: {}'
return info.format(self.puzzle, len(self.steps), self.priority)
def valid_moves(self):
"""Return a list of possible moves."""
empty = self.puzzle.index(len(self.puzzle))
# get all the neighbors of the empty cell
for n in _neighbors(self.puzzle, empty):
step = (n, empty)
if step != self.lastStep:
yield step
def apply_move(self, move, priority):
"""Apply the move to the current puzzle and return the new configuration."""
puzzle = list(self.puzzle)
steps = list(self.steps)
x, y = move
puzzle[x], puzzle[y] = puzzle[y], puzzle[x]
steps.append(move)
return Puzzle(puzzle, steps, priority, move)
def _compute_priority(puzzle, move, p):
"""Compute the new priority after the move specified."""
x, empty = move
idx = puzzle[x] - 1
return p - dist(puzzle, x, idx) + dist(puzzle, empty, idx)
def solve(puzzle, solutionFound=None, lowerBound=None):
"""Solve the puzzle and returns the steps made.
Calls solutionFound every time a new valid solution is found.
Stop the search if a soluzione with a number of steps lower or equal to
the lowerBound if found. If lowerBound is equal to -1 returns the first
solution."""
# check if the puzzle is solvable
if not is_solvable(puzzle) or is_solved(puzzle):
return None
# compute a first heuristic solution
if len(puzzle) == 16:
bestSteps = solve15_heuristic(puzzle, subOpt=True)
elif len(puzzle) == 9:
bestSteps = solve8_heuristic(puzzle)
elif len(puzzle) == 4:
bestSteps = solve3_heuristic(puzzle)
else:
bestSteps = None
# print and/or return the first solution
if bestSteps:
if solutionFound:
solutionFound(tuple(bestSteps))
if lowerBound and (lowerBound == -1 or len(bestSteps) <= lowerBound):
return bestSteps
# init the frontier with the original puzzle
frontier = []
heappush(frontier, Puzzle(puzzle, [], manhattan_dist(puzzle)))
# add new steps while the frontier is not empty
while frontier:
# get the next puzzle configuration
currState = heappop(frontier)
# check if the puzzle is solved
if is_solved(currState.puzzle):
# callback for the new solution
if solutionFound:
solutionFound(tuple(currState.steps))
# update the best solution
bestSteps = tuple(currState.steps)
# stop search if we reach the lower bound
if lowerBound and len(bestSteps) <= lowerBound:
break
# iterate over all possible moves
for move in currState.valid_moves():
# compute the priority of the puzzle after the move
# the priority represents the minimum number of steps required
# in order to reach the final configuration (the solved puzzle)
priority = _compute_priority(currState.puzzle, move, currState.priority)
# add the new configuration only if we can reach a better solution
if not bestSteps or (len(currState.steps) + 1 + priority) < len(bestSteps):
heappush(frontier, currState.apply_move(move, priority))
# search is over, returns the best steps found
return bestSteps
def display(puzzle):
"""Print a formatted grid."""
size = int(sqrt(len(puzzle)))
print((('{:4d}' * size + '\n') * size).format(*puzzle))
def spuzzle(size):
"""Returns a new valid random puzzle."""
puzzle = [i+1 for i in range(size ** 2)]
# shuffle the puzzle until it's solvable
shuffle(puzzle)
while not is_solvable(puzzle):
shuffle(puzzle)
return puzzle
|
gliderkite/puzzle15
|
puzzle15.py
|
Python
|
mit
| 15,938
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# vim: ai ts=4 sts=4 et sw=4 nu
"""
(c) 2014 Ronan Delacroix
Doremi API Requests definition
:author: Ronan Delacroix
"""
import six
import tbx.bytes
class MessageDefinition(object):
"""
Request Definition object.
"""
def __init__(self, name, key, elements=None):
self.name = name
if six.PY3:
self.key = bytes.fromhex(key)
else:
self.key = str(key).decode('hex')
self.elements = elements or [] #List of Element or ResponseElement
@property
def element_names(self):
return [e.name for e in self.elements]
class Element(object):
"""
Message Element Definition
"""
def __init__(self, name, func, **kwargs):
self.name = name
self.func = func
self.kwargs = kwargs
class ResponseElement(Element):
"""
Response Message Element Definition
"""
def __init__(self, name, start, end, func, text_translate=None):
self.name = name
self.func = func
self.start = start
self.end = end
self.text_translate = text_translate
class ResponseBatch(Element):
"""
Response Message Element Definition
"""
def __init__(self, name, start, end, sub_elements=None):
super(ResponseBatch, self).__init__(name=name, func=self.func)
self.start = start
self.end = end
self.sub_elements = sub_elements or []
self.text_translate = None
def func(self, byte_array):
result = []
length = tbx.bytes.bytes_to_int(byte_array[0:4])
item_size = tbx.bytes.bytes_to_int(byte_array[4:8])
for i in range(0, length):
item = {}
chunk = byte_array[8+i*item_size:8+(i+1)*item_size]
for e in self.sub_elements:
sub_chunk = chunk[e.start:e.end]
item[e.name] = e.func(sub_chunk)
if e.text_translate:
item[e.name+'_text'] = e.text_translate.get(item[e.name], 'unknown value')
result.append(item)
return result
class MessageList(object):
"""
Message class.
Allows to index a request/response definition set.
"""
def __init__(self, messages):
self.messages = messages
self.index_by_name = {}
self.index_by_key = {}
for d in self.messages:
self.index_by_key[d.key] = d
self.index_by_name[d.name] = d
def get_by_name(self, name):
return self.index_by_name.get(name, None)
def get_by_key(self, k):
if isinstance(k, str):
if six.PY3:
k = bytes.fromhex(k)
else:
k = k.encode('hex')
return self.index_by_key.get(bytes(k), None)
def get(self, key_or_name):
if isinstance(key_or_name, bytes) or isinstance(key_or_name, bytearray):
return self.get_by_key(key_or_name) or self.get_by_name(key_or_name)
else:
return self.get_by_name(key_or_name) or self.get_by_key(key_or_name)
def list_names(self):
return self.index_by_name.keys()
def list_keys(self):
return self.index_by_key.keys()
def __getattr__(self, name):
if name in self.index_by_name.keys():
return self.get_by_name(name)
return super(MessageList, self).__getattr__(name)
class MessageListWrapper(MessageList):
"""
Module Wrapper Class.
Same as parent class but can wrap a module.
See here for wrapping module class : http://stackoverflow.com/questions/2447353/getattr-on-a-module
"""
def __init__(self, wrapped, messages):
self.wrapped = wrapped
super(MessageListWrapper, self).__init__(messages)
def __getattr__(self, name):
"""
Fall back on module to get attributes
"""
try:
super(MessageListWrapper, self).__getattr__(name)
except AttributeError:
return getattr(self.wrapped, name)
|
ronhanson/python-dcitools
|
dcitools/devices/doremi/message.py
|
Python
|
mit
| 4,018
|
# -*- coding: utf-8 -*-
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Module for rpm package deploy and install using presto-admin
"""
import logging
from fabric.context_managers import settings, hide
from fabric.decorators import task, runs_once
from fabric.operations import sudo, put, os, local
from fabric.state import env
from fabric.tasks import execute
from fabric.utils import abort
from prestoadmin import topology
from prestoadmin.util import constants
from prestoadmin.util.fabricapi import get_host_list
_LOGGER = logging.getLogger(__name__)
__all__ = ['install']
@task
@runs_once
def install(local_path):
"""
Install the rpm package on the cluster
Args:
local_path: Absolute path to the rpm to be installed
--nodeps (optional): Flag to indicate if rpm install
should ignore checking package dependencies. Equivalent
to adding --nodeps flag to rpm -i.
"""
topology.set_topology_if_missing()
execute(deploy_install, local_path,
hosts=get_host_list())
def check_if_valid_rpm(local_path):
_LOGGER.info("Checking rpm checksum to see if it is corrupted")
with settings(hide('warnings', 'stdout'), warn_only=True):
result = local('rpm -K --nosignature ' + local_path, capture=True)
if 'MD5 NOT OK' in result.stdout:
abort("Corrupted RPM. Try downloading the RPM again.")
elif result.stderr:
abort(result.stderr)
def deploy_install(local_path):
check_if_valid_rpm(local_path)
deploy(local_path)
rpm_install(os.path.basename(local_path))
def deploy(local_path=None):
_LOGGER.info("Deploying rpm on %s..." % env.host)
print("Deploying rpm on %s..." % env.host)
sudo('mkdir -p ' + constants.REMOTE_PACKAGES_PATH)
ret_list = put(local_path, constants.REMOTE_PACKAGES_PATH, use_sudo=True)
if not ret_list.succeeded:
_LOGGER.warn("Failure during put. Now using /tmp as temp dir...")
ret_list = put(local_path, constants.REMOTE_PACKAGES_PATH,
use_sudo=True, temp_dir='/tmp')
if ret_list.succeeded:
print("Package deployed successfully on: " + env.host)
def rpm_install(rpm_name):
_LOGGER.info("Installing the rpm")
nodeps = ''
if env.nodeps:
nodeps = '--nodeps '
ret = sudo('rpm -i %s%s' %
(nodeps, constants.REMOTE_PACKAGES_PATH + "/" + rpm_name))
if ret.succeeded:
print("Package installed successfully on: " + env.host)
|
Svjard/presto-admin
|
prestoadmin/package.py
|
Python
|
apache-2.0
| 2,988
|
# -*- Mode: Python -*-
# vi:si:et:sw=4:sts=4:ts=4
#
# Flumotion - a streaming media server
# Copyright (C) 2004,2005,2006,2007,2008 Fluendo, S.L. (www.fluendo.com).
# All rights reserved.
# This file may be distributed and/or modified under the terms of
# the GNU General Public License version 2 as published by
# the Free Software Foundation.
# This file is distributed without any warranty; without even the implied
# warranty of merchantability or fitness for a particular purpose.
# See "LICENSE.GPL" in the source distribution for more information.
# Licensees having purchased or holding a valid Flumotion Advanced
# Streaming Server license may use this file in accordance with the
# Flumotion Advanced Streaming Server Commercial License Agreement.
# See "LICENSE.Flumotion" in the source distribution for more information.
# Headers in this file shall remain intact.
import gettext
import re
from twisted.internet import defer
from flumotion.admin.assistant.models import AudioEncoder, VideoEncoder, Muxer
from flumotion.admin.gtk.workerstep import WorkerWizardStep
from flumotion.ui.wizard import WizardStep
from flumotion.common import errors, messages
from flumotion.common.i18n import N_, gettexter
from flumotion.scenario.steps.summarysteps import LiveSummaryStep
__version__ = "$Rev$"
_ = gettext.gettext
T_ = gettexter()
_PREFERRED_VIDEO_ENCODER = "theora"
_PREFERRED_AUDIO_ENCODER = "vorbis"
# the denominator arg for all calls of this function was sniffed from
# the glade file's spinbutton adjustment
def _fraction_from_float(number, denominator):
"""
Return a string to be used in serializing to XML.
"""
return "%d/%d" % (number * denominator, denominator)
class ConversionStep(WorkerWizardStep):
name = 'Encoding'
title = _('Encoding')
section = _('Conversion')
gladeFile = 'encoding-wizard.glade'
docSection = 'help-configuration-assistant-encoders'
docAnchor = ''
docVersion = 'local'
def __init__(self, wizard):
WorkerWizardStep.__init__(self, wizard)
self._muxer = None
# Public API
def getAudioPage(self):
if self.wizard.getScenario().hasAudio(self.wizard):
return self._getAudioPage()
return None
def getMuxerType(self):
"""Returns the component-type, such as "ogg-muxer"
of the currently selected muxer.
@returns: the muxer
@rtype: string
"""
entry = self.muxer.get_selected()
return entry.componentType
def getMuxerFormat(self):
"""Returns the format of the muxer, such as "ogg".
@returns: the muxer formats
@rtype: string
"""
entry = self.muxer.get_selected()
return entry.getProvidedMediaTypes()[0]
def getAudioFormat(self):
"""Returns the format of the audio encoder, such as "vorbis"
@returns: the audio format
@rtype: string
"""
if self.wizard.getScenario().getAudioEncoder():
entry = self.audio.get_selected()
return entry.getProvidedMediaTypes()[0]
def getVideoFormat(self):
"""Returns the format of the video encoder, such as "theora"
@returns: the video format
@rtype: string
"""
if self.wizard.getScenario().getVideoEncoder():
entry = self.video.get_selected()
return entry.getProvidedMediaTypes()[0]
# WizardStep
def activated(self):
data = [('muxer', self.muxer, None, None)]
if self.wizard.getScenario().hasAudio(self.wizard):
oldAudioEncoder = self.wizard.getScenario().getAudioEncoder()
data.append(('audio-encoder', self.audio,
_PREFERRED_AUDIO_ENCODER,
oldAudioEncoder))
else:
self.audio.hide()
self.label_audio.hide()
self.wizard.getScenario().setAudioEncoder(None)
if self.wizard.getScenario().hasVideo(self.wizard):
oldVideoEncoder = self.wizard.getScenario().getVideoEncoder()
data.append(('video-encoder', self.video,
_PREFERRED_VIDEO_ENCODER,
oldVideoEncoder))
else:
self.video.hide()
self.label_video.hide()
self.wizard.getScenario().setVideoEncoder(None)
# If there is data in the combo already, do not populate it,
# Because it means we're pressing "back" in the wizard and the
# combo is already populated.
hasVideo = len(self.video) > 0
hasAudio = len(self.audio) > 0
if not hasVideo or not hasAudio:
self._populateCombos(data)
def getNext(self):
#TODO: Share in some way this code with the productionsteps page.
if self.wizard.getScenario().hasVideo(self.wizard):
return self._getVideoPage()
elif self.wizard.getScenario().hasAudio(self.wizard):
return self._getAudioPage()
else:
raise AssertionError
def workerChanged(self, worker):
if self._muxer:
self._muxer.workerChanged(worker)
# Private
def _populateCombos(self, combos, provides=None):
self.debug("populating combos %r", combos)
self.wizard.waitForTask('querying encoders')
defers = []
for ctype, combo, defaultType, oldComponent in combos:
combo.prefill([('...', None)])
combo.set_sensitive(False)
d = self.wizard.getWizardEntries(
wizardTypes=[ctype],
provides=provides)
d.addCallback(self._addEntries, ctype, combo, defaultType,
oldComponent)
defers.append(d)
d = defer.DeferredList(defers)
d.addCallback(lambda x: self.wizard.taskFinished())
return d
def _canAddMuxer(self, entry):
# Fetch the media types the muxer accepts ('audio', 'video')
types = [t.split(':')[0] for t in entry.getAcceptedMediaTypes()]
acceptAudio = 'audio' in types
acceptVideo = 'video' in types
if acceptVideo ^ acceptAudio:
hasAudio = self.wizard.getScenario().hasAudio(self.wizard)
hasVideo = self.wizard.getScenario().hasVideo(self.wizard)
if hasAudio and not acceptAudio or hasVideo and not acceptVideo:
return False
return True
def _addEntries(self, entries, ctype, combo, defaultType, oldComponent):
self.debug('adding entries for ctype %s: %r with defaultType %s',
ctype, entries, defaultType)
data = []
for entry in entries:
if ctype != 'muxer' or self._canAddMuxer(entry):
item = (N_(entry.description), entry)
providedMediaTypes = entry.getProvidedMediaTypes()
self.debug("adding entry %r", providedMediaTypes)
if defaultType and defaultType in providedMediaTypes:
data.insert(0, item)
else:
data.append(item)
combo.prefill(data)
combo.set_sensitive(True)
if oldComponent:
for description, entry in combo.get_model_items().iteritems():
if entry.componentType == oldComponent.componentType:
combo.select(entry)
break
def _loadPlugin(self, entry):
def gotFactory(factory):
return factory(self.wizard)
def no_bundle(failure):
failure.trap(errors.NoBundleError)
d = self.wizard.getWizardEntry(entry.componentType)
d.addCallback(gotFactory)
d.addErrback(no_bundle)
return d
def _loadStep(self, combo):
def pluginLoaded(plugin, entry):
# FIXME: verify that factory implements IEncoderPlugin
step = plugin.getConversionStep()
return step
entry = combo.get_selected()
d = self._loadPlugin(entry)
d.addCallback(pluginLoaded, entry)
return d
def _getAudioPage(self):
def stepLoaded(step):
if step is not None:
self.wizard.getScenario().setAudioEncoder(step.model)
self.wizard.taskFinished()
return step
self.wizard.waitForTask('audio encoder page')
d = self._loadStep(self.audio)
d.addCallback(stepLoaded)
return d
def _getVideoPage(self):
def stepLoaded(step):
if step is not None:
self.wizard.getScenario().setVideoEncoder(step.model)
self.wizard.taskFinished()
return step
self.wizard.waitForTask('video encoder page')
d = self._loadStep(self.video)
d.addCallback(stepLoaded)
return d
def _muxerChanged(self):
muxerEntry = self.muxer.get_selected()
self.wizard.message_area.clear()
# '...' used while waiting for the query to be done
if muxerEntry is None:
return
def combosPopulated(unused):
return self._loadPlugin(muxerEntry)
def pluginLoaded(plugin, entry):
if plugin:
self._muxer = plugin
return plugin.workerChanged(self.worker)
else:
# no plugin defined, behaving like before
# FIXME: make check should make sure all muxers have a
# plugin/factory and fail if not
self.wizard.clear_msg('assistant-bundle')
self.wizard.taskFinished()
provides = map(lambda f: f.find(':') > 0 and f.split(':', 1)[1] or f,
muxerEntry.getAcceptedMediaTypes())
d = self._populateCombos(
[('audio-encoder', self.audio, _PREFERRED_AUDIO_ENCODER,
self.wizard.getScenario().getAudioEncoder()),
('video-encoder', self.video, _PREFERRED_VIDEO_ENCODER,
self.wizard.getScenario().getVideoEncoder())],
provides=provides)
d.addCallback(combosPopulated)
d.addCallback(pluginLoaded, muxerEntry)
return d
# Callbacks
def on_muxer__changed(self, combo):
self._muxerChanged()
class SelectFormatStep(WizardStep):
name = 'Encoding'
title = _('Select Format')
section = _('Format')
gladeFile = 'encoding-wizard.glade'
docSection = 'help-configuration-assistant-encoders'
docAnchor = ''
docVersion = 'local'
# Public API
def setMuxers(self, muxers):
self._muxers = [muxer for muxer in muxers]
def getMuxerFormat(self):
"""Returns the format of the muxer, such as "ogg".
@returns: the muxer formats
@rtype: string
"""
muxer = self.muxer.get_selected()
if not muxer:
return
entry = self._entries[muxer.componentType]
return entry.getProvidedMediaTypes()[0]
def getMuxerType(self):
"""Returns the component-type, such as "ogg-muxer"
of the currently selected muxer.
@returns: the muxer
@rtype: string
"""
muxer = self.muxer.get_selected()
if not muxer:
return
entry = self._entries[muxer.componentType]
return entry.componentType
def getAudioFormat(self):
"""Returns the format of the audio encoder, such as "vorbis"
@returns: the audio format
@rtype: string
"""
return None
def getVideoFormat(self):
"""Returns the format of the video encoder, such as "theora"
@returns: the video format
@rtype: string
"""
return None
# WizardStep
def activated(self):
self.audio.hide()
self.label_audio.hide()
self.wizard.getScenario().setAudioEncoder(None)
self.video.hide()
self.label_video.hide()
self.wizard.getScenario().setVideoEncoder(None)
self._populateCombo()
def _populateCombo(self):
self.debug("populating muxer combo")
self.wizard.waitForTask('get entries')
d = self.wizard.getWizardEntries(wizardTypes=['muxer'])
d.addCallback(self._addEntries)
self.muxer.prefill([('...', None)])
d.addCallback(lambda x: self.wizard.taskFinished() and
self.muxer.set_sensitive(True))
def _addEntries(self, entries):
data = []
self._entries = \
dict([(entry.componentType, entry) for entry in entries])
for muxer in self._muxers:
pattern = re.compile('^muxer-(.*?)\d*$')
match = pattern.search(muxer.name)
muxer.type = match.group(1)
desc = '%s (%s)' % (N_(muxer.description), muxer.name)
data.append((desc, muxer))
self.muxer.prefill(data)
def getNext(self):
# Return audio/video/audio-video http streamer page
from flumotion.scenario.steps.httpstreamersteps import HTTPBothStep, \
HTTPAudioStep, HTTPVideoStep, HTTPGenericStep
self.wizard.cleanFutureSteps()
muxer = self.muxer.get_selected()
if muxer.type == 'audio-video':
self.wizard.addStepSection(HTTPBothStep(self.wizard))
elif muxer.type == 'video':
self.wizard.addStepSection(HTTPVideoStep(self.wizard))
elif muxer.type == 'audio':
self.wizard.addStepSection(HTTPAudioStep(self.wizard))
else:
self.wizard.addStepSection(HTTPGenericStep(self.wizard,
muxer.type))
self.wizard.addStepSection(LiveSummaryStep)
# Callbacks
def on_muxer__changed(self, combo):
muxer = combo.get_selected()
if not muxer:
return
self.wizard.getScenario().setExistingMuxer(muxer)
|
ylatuya/Flumotion
|
flumotion/scenario/steps/conversionsteps.py
|
Python
|
gpl-2.0
| 13,864
|
"""mercury URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/1.8/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: url(r'^$', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home')
Including another URLconf
1. Add an import: from blog import urls as blog_urls
2. Add a URL to urlpatterns: url(r'^blog/', include(blog_urls))
"""
from django.conf.urls import include, url
from django.contrib import admin
from django.views.generic.base import RedirectView
from mercury.views import (
ApplicationDetail,
ApplicationList,
NodeDetail,
NodeList,
TrafficMap,
application_traffic,
)
urlpatterns = [
url(r'^admin/', include(admin.site.urls)),
url(r'^app/$', ApplicationList.as_view(), name='app-list'),
url(r'^app/(?P<protocol>\w+)/(?P<port>\d+)/$', ApplicationDetail.as_view(),
name='app-detail'),
url(r'^nodes/$', NodeList.as_view(), name='node-list'),
url(r'^nodes/(?P<pk>\d+)/$', NodeDetail.as_view(), name='node-detail'),
url(r'^traffic/map/$', TrafficMap.as_view(), name='traffic-map'),
url(r'^api/app-traffic/$', application_traffic),
url(r'^$', RedirectView.as_view(url='app', permanent=False), name='index'),
]
|
jonstacks13/mercury
|
mercury/urls.py
|
Python
|
gpl-2.0
| 1,467
|
# -*- coding: utf-8 -*-
# This file is part of Shuup.
#
# Copyright (c) 2012-2021, Shuup Commerce Inc. All rights reserved.
#
# This source code is licensed under the OSL-3.0 license found in the
# LICENSE file in the root directory of this source tree.
import pytest
from shuup.core.models import StaffOnlyBehaviorComponent
from shuup.testing.factories import get_default_payment_method, get_default_shop
from shuup_tests.utils.basketish_order_source import BasketishOrderSource
from shuup_tests.utils.fixtures import regular_user
regular_user = regular_user # noqa
@pytest.mark.django_db
def test_staff_only_behavior(admin_user, regular_user):
payment_method = get_default_payment_method()
component = StaffOnlyBehaviorComponent.objects.create()
payment_method.behavior_components.add(component)
source = BasketishOrderSource(get_default_shop())
# anonymous user
unavailability_reasons = list(payment_method.get_unavailability_reasons(source))
assert len(unavailability_reasons) == 1
# regular user
source.creator = regular_user
unavailability_reasons = list(payment_method.get_unavailability_reasons(source))
assert len(unavailability_reasons) == 1
# admin
source.creator = admin_user
unavailability_reasons = list(payment_method.get_unavailability_reasons(source))
assert len(unavailability_reasons) == 0
|
shoopio/shoop
|
shuup_tests/core/test_staff_only_behavior.py
|
Python
|
agpl-3.0
| 1,377
|
class peaktype(object):
scanned = 'scanned'
broad = 'broad'
noise = 'noise'
overlap = 'overlap'
unassigned = 'unassigned'
ladder = 'ladder'
called = 'called'
stutter = 'stutter'
artifact = 'artifact'
bin = 'bin'
ignored = 'ignored'
class channelstatus(object):
unassigned = 'unassigned' # dye is used in panel, but not in this channel
assigned = 'assigned' # channel is assigned to a marker
unused = 'unused' # dye is unused in panel
noisy = 'noisy'
empty = 'empty'
reseted = 'reseted' # channel is reseted (created empty)
scanned = 'scanned'
preannotated = 'preannotated'
aligned = 'aligned' # ladder peaks has been aligned to standard size
called = 'called'
binned = 'binned'
annotated = 'annotated'
ladder = 'ladder' # channel is used for ladder
class assaystatus(object):
uploaded = 'uploaded'
unassigned = 'unassigned'
assigned = 'assigned'
scanned = 'scanned'
preannotated = 'preannotated'
normalized = 'normalized'
aligned = 'aligned'
called = 'called'
binned = 'binned'
annotated = 'annotated'
class alignmethod(object):
notapplicable = 'notapplicable'
fast_hq = 'fast|highqual'
fast_mq = 'fast|medqual'
fast_hqr = 'fast|highqual-relax'
fast_mqr = 'fast|medqual-relax'
greedy_filtered = 'greedy|filtered'
greedy_shifted = 'greedy|shifted'
greedy_scored = 'greedy|scored'
minim_strict = 'minim|strict'
minim_relax = 'minim|relax'
pm_strict = 'pm|strict'
pm_relax = 'pm|relax'
hcm_strict = 'hcm|strict'
hcm_relax = 'hcm|relax'
gm_strict = 'gm|strict'
gm_relax = 'gm|relax'
de_relax = 'de|relax'
class scanningmethod(object):
notapplicable = 'notapplicable'
cwt = 'cwt' # CWT-based from scipy
pd = 'pd' # peak detection from peakutils
class allelemethod(object):
uncalled = 'uncalled'
leastsquare = 'leastsquare'
cubicspline = 'cubicspline'
localsouthern = 'localsouthern'
class binningmethod(object):
notavailable = 'notavailable'
auto = 'auto'
semiauto = 'semiauto'
dyes = [ '6-FAM', 'NED', 'VIC', 'PET', 'LIZ' ]
ladders = { 'LIZ600': { 'dye': 'LIZ',
'sizes': [ 20.0, 40.0, 60.0, 80.0, 100.0, 114.0, 120.0, 140.0, 160.0,
180.0, 200.0, 214.0, 220.0, 240.0, 250.0, 260.0, 280.0, 300.0,
314.0, 320.0, 340.0, 360.0, 380.0, 400.0, 414.0, 420.0, 440.0,
460.0, 480.0, 500.0, 514.0, 520.0, 540.0, 560.0, 580.0, 600.0 ],
'strict': {
'max_rss': 40.0,
'min_dpscore': 34.0,
'min_sizes': 36
},
'relax': {
'max_rss': 56.25,
'min_dpscore': 33.0,
'min_sizes': 36
},
'k': 6,
'a': 2,
'signature': [ 140, 160, 180, 200, 214, 220, 240, 250, 260, 280, 300, 314, 320 ],
'order': 3,
},
'LIZ500': { 'dye': 'LIZ',
'sizes': [ 35, 50, 75, 100, 139, 150, 160, 200, 250, 300, 340, 350,
400, 450, 490, 500 ],
'strict': {
'max_rss': 17.5,
'min_dpscore': 14.0,
'min_sizes': 16
},
'relax': {
'max_rss': 25.0,
'min_dpscore': 13.0,
'min_sizes': 16
},
'k': 4,
'a': 1,
'signature': [ 75, 100, 139, 150, 160, 200, 250 ],
'order': 2,
},
}
|
trmznt/fatools
|
fatools/lib/const.py
|
Python
|
lgpl-3.0
| 4,039
|
import numpy
from pygbe.util import an_solution
from convergence import (run_convergence, picklesave, pickleload,
report_results, mesh)
def main():
print('{:-^60}'.format('Running sphere_molecule_single test'))
try:
test_outputs = pickleload()
except FileNotFoundError:
test_outputs = {}
problem_folder = 'input_files'
# molecule_single
param = 'sphere_fine.param'
test_name = 'molecule_single'
if test_name not in test_outputs.keys():
N, iterations, Esolv, Esurf, Ecoul, Time = run_convergence(
mesh, test_name, problem_folder, param)
test_outputs[test_name] = [N, iterations, Esolv, Esurf, Ecoul, Time]
picklesave(test_outputs)
# load results for analysis
Esolv, Esurf, Ecoul = test_outputs['molecule_single'][2:5]
Time = test_outputs['molecule_single'][-1]
N, iterations = test_outputs['molecule_single'][:2]
total_time = Time
analytical = an_solution.an_P(
numpy.array([1.]), numpy.array([[1., 1., 1.41421356]]), 4., 80., 5.,
0.125, 5., 20)
error = abs(Esolv - analytical) / abs(analytical)
report_results(error,
N,
iterations,
Esolv,
analytical,
total_time,
energy_type='Total',
test_name='sphere molecule single')
if __name__ == "__main__":
from check_for_meshes import check_mesh
mesh_file = 'https://zenodo.org/record/55349/files/pygbe_regresion_test_meshes.zip'
folder_name = 'regresion_tests_meshes'
rename_folder = 'geometry'
size = '~10MB'
check_mesh(mesh_file, folder_name, rename_folder, size)
main()
|
barbagroup/pygbe
|
tests/convergence_tests/sphere_molecule_single.py
|
Python
|
bsd-3-clause
| 1,741
|
"""Module containing classes for extracting/constructing features from data"""
__author__ = 'wittawat'
from abc import ABCMeta, abstractmethod
import numpy as np
import scipy.stats as stats
class FeatureMap(object):
"""Abstract class for a feature map function"""
__metaclass__ = ABCMeta
@abstractmethod
def gen_features(self, X):
"""Generate D features for each point in X.
- X: nxd data matrix
Return a n x D numpy array.
"""
pass
@abstractmethod
def num_features(self, X=None):
"""
Return the number of features that this map will generate for X.
X is optional.
"""
pass
class MarginalCDFMap(FeatureMap):
"""
A FeatureMap that returns a new set of variates generated by applying
the empirical CDF of each variate to its corresponding variate.
Also called, a copula transform or a probability integral transform.
"""
def gen_features(self, X):
"""
Cost O(dn*log(n)) where X in n x d.
"""
n, d = X.shape
Z = np.zeros((n, d))
for j in range(d):
Z[:, j] = stats.rankdata(X[:, j])/float(n)
return Z
def num_features(self, X):
return X.shape[1]
class RFFKGauss(FeatureMap):
"""
A FeatureMap to construct random Fourier features for a Gaussian kernel.
"""
def __init__(self, sigma2, n_features, seed=20):
"""
n_features: number of random Fourier features. The total number of
dimensions will be n_features*2.
"""
assert sigma2 > 0, 'sigma2 not positive. Was %s'%str(sigma2)
assert n_features > 0
self.sigma2 = sigma2
self.n_features = n_features
self.seed = seed
def gen_features(self, X):
rstate = np.random.get_state()
np.random.seed(self.seed)
n, d = X.shape
D = self.n_features
W = np.random.randn(D, d)
# n x D
XWT = X.dot(W.T)/np.sqrt(self.sigma2)
Z1 = np.cos(XWT)
Z2 = np.sin(XWT)
Z = np.hstack((Z1, Z2))*np.sqrt(1.0/self.n_features)
np.random.set_state(rstate)
return Z
def num_features(self, X=None):
return 2*self.n_features
class NystromFeatureMap(FeatureMap):
"""
A FeatureMap to construct features Z (n x D) such that Z.dot(Z.T) gives
a good approximation to the kernel matrix K constructed by using the
specified kernel k.
Procedure
- A subset of D inducing points is given.
- Form an n x D kernel matrix K between the input points and the inducing
points.
- Form a D x D kernel matrix M of the inducing points.
- Features = K.dot(M**-0.5) (matrix power)
"""
def __init__(self, k, inducing_points):
"""
k: a Kernel
inducing_points: a D x d matrix. D = number of points. d = dimensions.
The number of features is D.
"""
self.k = k
self.inducing_points = inducing_points
# a cache to make it faster
M = k.eval(inducing_points, inducing_points)
# eigen decompose. Want to raise to the power of -0.5
evals, V = np.linalg.eig(M)
# Assume M is full rank
pow_evals = 1.0/np.sqrt(evals + 1e-6)
self._invert_half = V.dot(np.diag(pow_evals)).dot(V.T)
def gen_features(self, X):
n, d = X.shape
if d != self.inducing_points.shape[1]:
raise ValueError('dimension of the input does not match that of the inducing points')
K = self.k.eval(X, self.inducing_points)
Z = K.dot(self._invert_half)
return Z
def num_features(self, X=None):
return self.inducing_points.shape[1]
|
Diviyan-Kalainathan/dependency_criteria
|
lib/fsic/feature.py
|
Python
|
mit
| 3,750
|
# encoding: utf-8
import os
from django.test.runner import DiscoverRunner
from django_nose import NoseTestSuiteRunner
from optparse import make_option
from django.core import management
from django.contrib.auth.management.commands import changepassword
from unittest.result import TestResult
browser = 'Firefox'
sauce_username = ''
sauce_accesskey = ''
sauce_platforms = [
{"platform": "Mac OS X 10.11", "browserName": "chrome", "version": "52"},
{"platform": "Windows 8.1", "browserName": "internet explorer", "version": "11"},
{"platform": "Linux", "browserName": "firefox", "version": "44"}
]
class Runner(DiscoverRunner):
"""
The browser test runner modifies the following from the default django runner:
1. default test files pattern is browser_test*.py
2. doesn't use the test databases - it uses the actual configured database
3. adds some options for selecting browser to test with / remotely with SauceLabs
"""
option_list = (
make_option('-t', '--top-level-directory',
action='store', dest='top_level', default=None,
help='Top level of project for unittest discovery.'),
make_option('-p', '--pattern', action='store', dest='pattern',
default="browser_cases*.py",
help='The test matching pattern. Defaults to browser_cases*.py.'),
make_option('--full-initialization', action='store_true', dest='fullinitialization',
default=False,
help='this should only be used when running ci - it initializes the environment from scratch'),
make_option('--sauce-user', action='store', dest='sauceuser',
default='',
help='SauceLabs username (required if browser=Sauce, see https://docs.saucelabs.com/reference/sauce-connect/#managing-multiple-tunnels)'),
make_option('--sauce-accesskey', action='store', dest='sauceaccesskey',
default='',
help='SauceLabs access key (required if browser=Sauce)'),
make_option('--browser', action='store', dest='browser',
default='Firefox',
help='The browser to use for selenium tests default is "Firefox", can also run remotely on sauce labs - see docs')
)
def __init__(self, *args, **kwargs):
global browser, sauce_username, sauce_accesskey
if os.environ.get('KNESSET_BROWSER'):
browser = os.environ.get('KNESSET_BROWSER')
else:
browser = kwargs['browser']
if browser == 'Sauce':
if os.environ.get('SAUCE_USERNAME'):
sauce_username = os.environ.get('SAUCE_USERNAME')
else:
sauce_username = kwargs['sauceuser']
if os.environ.get('SAUCE_ACCESS_KEY'):
sauce_accesskey = os.environ.get('SAUCE_ACCESS_KEY')
else:
sauce_accesskey = kwargs['sauceaccesskey']
if 'fullinitialization' in kwargs.keys() and kwargs['fullinitialization']:
self.full_initialization()
super(Runner, self).__init__(*args, **kwargs)
def full_initialization(self):
self.create_superuser()
def create_superuser(self):
print('Creating test superuser (admin/123456)')
management.call_command('createsuperuser', interactive=False, username='admin',
email='OpenKnessetAdmin@mailinator.com')
command = changepassword.Command()
command._get_pass = lambda *args: '123456'
command.execute('admin')
def setup_databases(self, **kwargs):
pass
def teardown_databases(self, old_config, **kwargs):
pass
def run_suite(self, suite, **kwargs):
if browser == 'Sauce' and sauce_accesskey == '' and sauce_username == '':
print('Sauce selected but no accesskey and username - test suite will not run')
return TestResult()
else:
return super(Runner, self).run_suite(suite, **kwargs)
|
OriHoch/Open-Knesset
|
knesset/browser_test_runner.py
|
Python
|
bsd-3-clause
| 4,065
|
#!/usr/bin/env python2
# Copyright (c) 2016 The Bitcoin developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
#
# Test MVF post fork retargeting
#
# on node 0, test pure block height trigger at height FORK_BLOCK
#
from test_framework.test_framework import BitcoinTestFramework
from test_framework.util import *
from test_framework.arith import *
from random import randint
# period (in blocks) from fork activation until retargeting returns to normal
HARDFORK_RETARGET_BLOCKS = 180*144 # the period when retargeting returns to original
FORK_BLOCK = 2020 # needs to be >= 2018 to test fork difficulty reset
POW_LIMIT = 0x207fffff
PREFORK_BLOCKTIME = 800 # the seconds for a block during the regtest prefork
ORIGINAL_DIFFADJINTERVAL = 2016 # the original difficulty adjustment interval
STANDARD_BLOCKTIME = 600 # the standard target seconds for a block
def CalculateMVFNextWorkRequired(bits, actualBlockTimeSecs, targetBlockTimeSecs):
# Returns difficulty using the fork reset formula in pow.cpp:CalculateMVFNextWorkRequired()
bnPowLimit = bits2target_int(hex2bin(int2hex(POW_LIMIT))) # MVF-Core moved here
# Limit adjustment step
nActualTimespan = actualBlockTimeSecs
# Target by interval
nTargetTimespan = targetBlockTimeSecs
# permit 10x retargetchanges for a few blocks after the fork i.e. when nTargetTimespan is < 30 minutes (MVHF-CORE-DES-DIAD-5)
if (nTargetTimespan >= STANDARD_BLOCKTIME * 3) :
retargetLimit = 4
else :
retargetLimit = 10
# prevent abrupt changes to target
if (nActualTimespan < nTargetTimespan/retargetLimit) :
nActualTimespan = nTargetTimespan/retargetLimit
if (nActualTimespan > nTargetTimespan*retargetLimit) :
nActualTimespan = nTargetTimespan*retargetLimit
# compare with debug.log
#print "nTargetTimespan=%d nActualTimespan=%d" % (nTargetTimespan,nActualTimespan)
# Retarget
bnOld = bits2target_int(hex2bin(bits)) # SetCompact
# MVF-Core begin: move division before multiplication
# at regtest difficulty, the multiplication is prone to overflowing
bnNew1 = bnOld / nTargetTimespan
bnNew2 = bnNew1 * nActualTimespan
# Test for overflow
if (bnNew2 / nActualTimespan != bnNew1 or bnNew2 > bnPowLimit):
bnNew = bnPowLimit
else :
bnNew = bnNew2
newBits = "0x%s" % bin2hex(target_int2bits(bnNew)) # GetCompact
nBitsReset = int(newBits,0)
return nBitsReset
def CalculateMVFResetWorkRequired(bits):
# Returns difficulty using the fork reset formula in pow.cpp:CalculateMVFResetWorkRequired()
bnPowLimit = bits2target_int(hex2bin(int2hex(POW_LIMIT)))
# drop difficulty via factor
nDropFactor = HARDFORK_DROPFACTOR_REGTEST_DEFAULT
# total blocktimes prefork during run_test
nActualTimespan = ORIGINAL_DIFFADJINTERVAL * PREFORK_BLOCKTIME
# used reduced target time span while within the re-target period
nTargetTimespan = nActualTimespan / nDropFactor
# compare with debug.log
#print "nTargetTimespan=%d nActualTimespan=%d" % (nTargetTimespan,nActualTimespan)
bnOld = bits2target_int(hex2bin(bits)) # SetCompact
bnNew1 = bnOld / nTargetTimespan
bnNew2 = bnNew1 * nActualTimespan
# check for overflow or overlimit
if (bnNew2 / nActualTimespan != bnNew1 or bnNew2 > bnPowLimit):
bnNew = bnPowLimit
else:
bnNew = bnNew2
nBitsReset = int("0x%s" % bin2hex(target_int2bits(bnNew)),0) # GetCompact
return nBitsReset
class MVF_RETARGET_BlockHeight_Test(BitcoinTestFramework):
def add_options(self, parser):
parser.add_option("--quick", dest="quick", default=False, action="store_true",
help="Run shortened version of test")
def setup_chain(self):
# random seed is initialized and output by the test framework
print("Initializing test directory " + self.options.tmpdir)
initialize_chain_clean(self.options.tmpdir, 1)
def setup_network(self):
self.nodes = []
self.is_network_split = False
self.nodes.append(start_node(0, self.options.tmpdir
,["-forkheight=%s"%FORK_BLOCK, "-rpcthreads=100","-blockversion=%s" % "0x20000000" ]
))
def is_fork_triggered_on_node(self, node=0):
""" check in log file if fork has triggered and return true/false """
# MVF-Core TODO: extend to check using RPC info about forks
nodelog = self.options.tmpdir + "/node%s/regtest/debug.log" % node
hf_active = search_file(nodelog, "isMVFHardForkActive=1")
fork_actions_performed = search_file(nodelog, "MVF: performing fork activation actions")
return (len(hf_active) > 0 and len(fork_actions_performed) == 1)
def run_test(self):
# check that fork does not trigger before the forkheight
print "Generating %s pre-fork blocks" % (FORK_BLOCK - 1)
#block0 already exists
best_block = self.nodes[0].getblock(self.nodes[0].getbestblockhash(), True)
preblocktime = best_block['time']
for n in range(FORK_BLOCK - 1):
# Change block times so that difficulty develops
preblocktime = preblocktime + PREFORK_BLOCKTIME
self.nodes[0].setmocktime(preblocktime)
self.nodes[0].generate(1)
print "Done generating %s pre-fork blocks" % (FORK_BLOCK - 1)
print "Stopping node 0"
stop_node(self.nodes[0],0)
print "Restarting node 0 with -force-retarget"
self.nodes[0] = start_node(0, self.options.tmpdir
,["-forkheight=%s"%FORK_BLOCK, "-force-retarget", "-rpcthreads=100","-blockversion=%s" % "0x20000000" ]
)
# Read difficulty before the fork
best_block = self.nodes[0].getblock(self.nodes[0].getbestblockhash(), True)
print "Pre-fork difficulty: %.10f %s " % (best_block['difficulty'], best_block['bits'])
nBits = CalculateMVFResetWorkRequired(best_block['bits'])
reset_bits = int2hex(nBits)
reset_diff_expected = bits2difficulty(nBits)
assert_greater_than(reset_diff_expected, 0)
# Test fork did not trigger prematurely
assert_equal(False, self.is_fork_triggered_on_node(0))
print "Fork did not trigger prematurely"
# Generate fork block
best_block = self.nodes[0].getblock(self.nodes[0].getbestblockhash(), True)
self.nodes[0].setmocktime(best_block['time'] + STANDARD_BLOCKTIME)
self.nodes[0].generate(1)
assert_equal(True, self.is_fork_triggered_on_node(0))
print "Fork triggered successfully (block height %s)" % best_block['height']
# Test fork difficulty reset
best_block = self.nodes[0].getblock(self.nodes[0].getbestblockhash(), True)
assert_equal(best_block['bits'],reset_bits)
#assert_equal(best_block['bits'], "207eeeee") # fixed reset
print "Post-fork difficulty reset success: %.10f %s " % (best_block['difficulty'], best_block['bits'])
# use to track how many times the same bits are used in a row
prev_block = 0
diffadjinterval = 0
# the first nexttimeblock test phase is cyclical increases of 50 seconds starting from here
# if the starting number is too low it may cause timeout errors too often
next_block_time = 300
count_bits_used = 0
# print column titles
print ">> Bits change log <<"
print "Time,Block,Delta(secs),Bits,Used,DiffAdjInterval,TimespanBlocks,Difficulty,NextBits"
# start generating MVF blocks with varying time stamps
oneRetargetPeriodAfterMVFRetargetPeriod = HARDFORK_RETARGET_BLOCKS+ORIGINAL_DIFFADJINTERVAL+1
if self.options.quick:
# used for CI - just test one day after fork
# this is basically just to test reset and initial response
number_of_blocks_to_test_after_fork = 144
else:
# full range
number_of_blocks_to_test_after_fork = oneRetargetPeriodAfterMVFRetargetPeriod = HARDFORK_RETARGET_BLOCKS+ORIGINAL_DIFFADJINTERVAL+1
for n in xrange(number_of_blocks_to_test_after_fork):
best_block = self.nodes[0].getblock(self.nodes[0].getbestblockhash(), True)
prev_block = self.nodes[0].getblock(best_block['previousblockhash'], True)
# track bits used
if (prev_block['bits'] == best_block['bits'] or best_block['height'] == FORK_BLOCK) and n < oneRetargetPeriodAfterMVFRetargetPeriod -1 :
count_bits_used += 1
else:
# when the bits change then output the retargeting metrics
# for the previous group of bits
print_block = self.nodes[0].getblock(self.nodes[0].getblockhash(prev_block['height'] - count_bits_used))
avgDeltaBlockTime = (prev_block['time'] - print_block['time']) / count_bits_used
if n == oneRetargetPeriodAfterMVFRetargetPeriod -1 :
nextBits = "end"
else :
# Test difficulty during MVF retarget period
first_block = self.nodes[0].getblock(self.nodes[0].getblockhash(prev_block['height'] - timespanblocks))
actualBlockTimeSecs = prev_block['time'] - first_block['time']
nBits = CalculateMVFNextWorkRequired(prev_block['bits'], actualBlockTimeSecs, difficultytimespan)
nextBits = int2hex(nBits)
#best_diff_expected = bits2difficulty(nBits)
#print "%s %.10f : %s " % (nextBits, best_diff_expected, best_block['bits'])
#if best_block['bits'] <> nextBits : #debug
#print "err bits %s %s %s " % (best_block['bits'], nextBits, diffadjinterval)
#raw_input()
assert_equal(best_block['bits'], nextBits)
print "%s,%d,%d,%s,%d,%d,%d,%.10f,%s " %(
time.strftime("%Y-%m-%d %H:%M",time.gmtime(prev_block['time'])),
prev_block['height'],
avgDeltaBlockTime,
prev_block['bits'],
count_bits_used,
diffadjinterval,
timespanblocks,
prev_block['difficulty'],
nextBits
)
# reset bits tracking variables
count_bits_used = 1
#raw_input()
#### end if prev_block['bits'] == best_block['bits']
# Get difficulty time span
difficultytimespan = self.nodes[0].getblockchaininfo()['difficultytimespan']
timespanblocks = difficultytimespan / STANDARD_BLOCKTIME
#print "%s : %s" % (best_block['height'],timespanblocks)
# Get difficulty adjustment interval
diffadjinterval = self.nodes[0].getblockchaininfo()['difficultyadjinterval']
# Test processed bits are used within the expected difficulty interval
# except when the bits is at the bits limit: 207fffff
#
# In some cases the retarget causes the same bits to be returned
# so this test has been disabled.
#
#if int("0x%s"%prev_block['bits'],0) <> POW_LIMIT :
#if count_bits_used > diffadjinterval : #debug
#print "err count_bits_used %s : %s " % (prev_block['bits'], nextBits)
#raw_input()
#assert_less_than_equal(count_bits_used, diffadjinterval)
# Setup various block time interval tests
if n in range(0,11) :
next_block_time = next_block_time + 50
elif n in range(11,22) :
# this may cause bits to hit the limit POW_LIMIT
next_block_time = 1200
elif n in range(22,26) :
# this may cause timeout errors
next_block_time = 300
elif n in range(26,500) :
# exactly standard block times
next_block_time = STANDARD_BLOCKTIME
elif n in range(500,525) :
# simulate faster blocks
# this may cause timeout errors
next_block_time = randint(100,300)
elif n in range(525,550) :
# simulate slow blocks
# this may cause bits to hit the limit POW_LIMIT
next_block_time = randint(1000,3000)
elif n >= HARDFORK_RETARGET_BLOCKS :
# exactly standard block times so when the original retargeting
# begins again the difficulty will stay about the same
next_block_time = STANDARD_BLOCKTIME
else:
# simulate ontime blocks i.e. hash power/difficult around 600 secs
next_block_time = randint(500,700)
self.nodes[0].setmocktime(best_block['time'] + next_block_time)
# Test the interval matches the interval defined in params.DifficultyAdjustmentInterval()
# notice the range() high setting is plus one versus c++ switch
if n in range(0,2017) :
diff_interval_expected = 1 # retarget every block
elif n in range(2017,4000) :
diff_interval_expected = 10
elif n in range(4000,10000) :
diff_interval_expected = 40
elif n in range(10000,15000) :
diff_interval_expected = 100
elif n in range(15000,20000) :
diff_interval_expected = 400
elif n in range(20000,HARDFORK_RETARGET_BLOCKS+1) :
diff_interval_expected = 1000
else:
diff_interval_expected = ORIGINAL_DIFFADJINTERVAL # every 14 days original
#if diff_interval_expected <> diffadjinterval :
#print "err diffadjinterval %d %d %d" % (n, diff_interval_expected, diffadjinterval)
#raw_input()
assert_equal(diff_interval_expected, diffadjinterval)
# print info for every block
#if best_block['height'] >= 16127 :
#first_block = self.nodes[0].getblock(self.nodes[0].getblockhash(prev_block['height'] - timespanblocks))
#print "%s :: %s :: %d :: %s :: %.10f :: %d :: %d" %(
#best_block['height'],
#time.strftime("%H:%M",time.gmtime(best_block['time'])),
#best_block['time'] - prev_block['time'],
#best_block['bits'],
#best_block['difficulty'],
#count_bits_used,
#first_block['height'])
#raw_input()
# generate the next block
self.nodes[0].generate(1)
#### end for n in xrange
print "Done."
if __name__ == '__main__':
MVF_RETARGET_BlockHeight_Test().main()
|
BTCfork/hardfork_prototype_1_mvf-core
|
qa/rpc-tests/mvf-core-retarget.py
|
Python
|
mit
| 15,092
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# import
## batteries
import os
import sys
import unittest
## 3rd party
import pandas as pd
## package
from leylab_pipelines import Utils
# data dir
test_dir = os.path.join(os.path.dirname(__file__))
data_dir = os.path.join(test_dir, 'data')
# tests
class Test_Utils(unittest.TestCase):
def setUp(self):
pass
def tearDown(self):
pass
def test_make_range(self):
x = Utils.make_range('all')
self.assertIsNone(x)
x = Utils.make_range('0')
self.assertListEqual(x, [0])
x = Utils.make_range('1,2,5')
self.assertListEqual(x, [1,2,5])
x = Utils.make_range('1,2,5-6')
self.assertListEqual(x, [1,2,5,6])
x = Utils.make_range('1-3,5-6')
self.assertListEqual(x, [1,2,3,5,6])
def test_make_range_zeroindex(self):
x = Utils.make_range('all', set_zero_index=True)
self.assertIsNone(x)
with self.assertRaises(ValueError):
Utils.make_range('0', set_zero_index=True)
x = Utils.make_range('1,2,5', set_zero_index=True)
self.assertListEqual(x, [0,1,4])
x = Utils.make_range('1,2,5-6', set_zero_index=True)
self.assertListEqual(x, [0,1,4,5])
def test_check_gwl(self):
gwl_file = os.path.join(data_dir, 'multi_dispense.gwl')
ret = Utils.check_gwl(gwl_file)
self.assertIsNone(ret)
|
leylabmpi/leylab_pipelines
|
tests/test_Utils.py
|
Python
|
mit
| 1,431
|
#!/usr/bin/python
#-*- coding: utf-8 -*-
###########################################################
# © 2011 Daniel 'grindhold' Brendle and Team
#
# This file is part of Skarphed.
#
# Skarphed is free software: you can redistribute it and/or
# modify it under the terms of the GNU Affero General Public License
# as published by the Free Software Foundation, either
# version 3 of the License, or (at your option) any later
# version.
#
# Skarphed is distributed in the hope that it will be
# useful, but WITHOUT ANY WARRANTY; without even the implied
# warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR
# PURPOSE. See the GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public
# License along with Skarphed.
# If not, see http://www.gnu.org/licenses/.
###########################################################
import os
import tarfile
import shutil
from StringIO import StringIO
from json import JSONDecoder
from skarphedcore.configuration import Configuration
from skarphedcore.database import Database
from skarphedcore.core import Core
from skarphedcore.module import ModuleManager
from skarphedcore.binary import Binary
from skarphedcore.css import CSSManager
from skarphedcore.view import Page, View
from skarphedcore.poke import PokeManager
from common.enums import ActivityType
from common.errors import TemplateException
class Template(object):
"""
The template class manages the Data that has to be handled with
template-packages and represents the template to the
Furthermore it hold the template-data in RAM to deliver it faster
to the clients.
"""
@classmethod
def get_current_template(cls):
"""
Receives .tar.gz'ed data and generates templatedata from it
"""
db = Database()
stmnt = "SELECT TPL_NAME, TPL_DESC, TPL_AUTHOR FROM TEMPLATE_INFO ;"
cur = db.query(stmnt)
tpldata = cur.fetchonemap()
if tpldata is None:
raise TemplateException(TemplateException.get_msg(1))
tpl = Template()
tpl.set_name(tpldata['TPL_NAME'])
tpl.set_description(tpldata['TPL_DESC'])
tpl.set_author(tpldata['TPL_AUTHOR'])
stmnt = "SELECT TPB_BIN_ID FROM TEMPLATE_BINARIES WHERE TPB_TPL_ID = 0 ;"
cur = db.query(stmnt)
rows = cur.fetchallmap()
for row in rows:
tpl.add_binary(row['TPB_BIN_ID'])
return tpl
@classmethod
def fetch_templates_for_gui(cls):
repository = ModuleManager.get_repository()
data = repository.get_all_templates()
return data
@classmethod
def install_from_repo(cls, nr):
repository = ModuleManager.get_repository()
data = repository.download_template(nr)
return cls.install_from_data(data)
@classmethod
def install_from_data(cls, data):
"""
Receives .tar.gz'ed data and generates templatedata from it
First validates the data. While validating it tracks all occuring
errors in the errorlog. If one severe error happens during validation,
the method stops before actually doing write-operations and returns
the errorlog to the client
Otherwise, it executes the installation and returns all
non-severe errors (warnings).
"""
def cleanup(path):
shutil.rmtree(path)
#TODO: Mutex this operation
errorlog = []
configuration = Configuration()
webpath = configuration.get_entry("core.webpath")
temp_installpath = webpath+"/tpl_install"
os.mkdir(temp_installpath)
tar = open(temp_installpath+"/tpl.tar.gz","w")
tar.write(data)
tar.close()
tar = tarfile.open(temp_installpath+"/tpl.tar.gz","r:gz")
tar.extractall(temp_installpath)
tar.close()
os.unlink(temp_installpath+"/tpl.tar.gz")
manifest_file = open(temp_installpath+"/manifest.json","r")
try:
manifest = JSONDecoder().decode(manifest_file.read())
except ValueError,e:
errorlog.append({'severity':1,
'type':'PackageFile',
'msg':'JSON seems to be corrupt'})
cleanup(temp_installpath)
return errorlog
manifest_file.close()
#BEGIN TO VALIDATE DATA
try:
f = open(temp_installpath+"/general.css")
general_css = f.read()
f.close()
except IOError,e:
errorlog.append({'severity':1,
'type':'PackageFile',
'msg':'File not in Package general.css'})
css_manager = CSSManager()
general_csspropertyset = None
try:
general_csspropertyset = css_manager.create_csspropertyset_from_css(general_css)
general_csspropertyset.set_type_general()
except Exception, e:
errorlog.append({'severity':1,
'type':'CSS-Data',
'msg':'General CSS File does not Contain Valid CSS '+str(e)})
pagedata = [] # Prepared filedata for execution into Database
for page in manifest['pages']:
if page['filename'].endswith(".html"):
name = page['filename'].replace(".html","",1)
elif page['filename'].endswith(".htm"):
name = page['filename'].replace(".htm","",1)
else:
errorlog.append({'severity':1,
'type':'PageData',
'msg':'Invalid format (allowed is .html and .htm: '+page['filename']})
try:
f = open(temp_installpath+"/"+page['filename'])
html = f.read()
f.close()
except IOError,e:
errorlog.append({'severity':1,
'type':'PageFile',
'msg':'File not in Package '+page['filename']})
continue
try:
f = open(temp_installpath+"/"+name+"_head.html","r")
html_head = f.read()
f.close()
except IOError,e:
errorlog.append({'severity':0,
'type':'PageFile',
'msg':'File not in Package '+name+"_head.html"})
html_head = ""
try:
f = open(temp_installpath+"/static/"+name+".css")
css = f.read()
f.close()
except IOError,e:
errorlog.append({'severity':1,
'type':'PageFile',
'msg':'File not in Package static/'+name+".css"})
continue
try:
f = open(temp_installpath+"/static/"+name+"_minimap.png","rb")
minimap = f.read()
f.close()
os.unlink(temp_installpath+"/static/"+name+"_minimap.png")
except IOError,e:
errorlog.append({'severity':0,
'type':'PageFile',
'msg':'File not in Package static/'+name+"_minimap.png"})
minimap = None
pagedata.append({'name':page['name'],
'desc':page['desc'],
'html_body':html,
'html_head':html_head,
'css':css,
'minimap':minimap,
'internal_name':name})
if len(errorlog) > 0:
is_severe_error = False
for error in errorlog:
if error['severity'] >= 1:
is_severe_error = True
break
if is_severe_error:
cleanup(temp_installpath)
return errorlog
# BEGIN TO WRITE DATA
#release maintenance mode at the end?
release_maintenance_mode = not cls.is_template_installed()
#uninstall old template
if cls.is_template_installed():
old_template = cls.get_current_template()
old_template.uninstall()
new_template = Template()
new_template.set_name(manifest['name'])
new_template.set_description(manifest['description'])
new_template.set_author(manifest['author'])
#create pages
for page in pagedata:
Page.create(page['name'],
page['internal_name'],
page['desc'],
page['html_body'],
page['html_head'],
page['css'],
page['minimap'])
#put binary into database
for bin_filename in os.listdir(temp_installpath+"/static"):
binary=None
try:
bin_file = open(temp_installpath+"/static/"+bin_filename,"rb")
bin_data = bin_file.read()
bin_file.close()
# TODO: Find more generic way to determine mimetype
if bin_filename.endswith(".png"):
binary = Binary.create("image/png", bin_data)
if bin_filename.endswith(".jpeg") or bin_filename.endswith(".jpg"):
binary = Binary.create("image/jpeg", bin_data)
else:
binary = Binary.create("application/octet-stream", bin_data)
if binary is not None:
binary.set_filename(bin_filename)
binary.store()
new_template.add_binary(binary.get_id())
except IOError, e:
errorlog.append({'severity':0,
'type':'PageFile',
'msg':'File seems broken static/'+bin_filename})
#read general.css into CSSPropertysets
general_csspropertyset.store()
new_template.store()
cleanup(temp_installpath)
#create a defaultview if there isnt
View.create_default_view()
if release_maintenance_mode:
Core().deactivate_maintenance_mode()
return errorlog
@classmethod
def is_template_installed(cls):
"""
checks whether there is a template installed
"""
db = Database()
stmnt = "SELECT COUNT(*) AS AMNT FROM TEMPLATE_INFO ;"
cur = db.query(stmnt)
row = cur.fetchonemap()
return bool(row['AMNT'])
def __init__(self):
self._name = None
self._description = None
self._author = None
self._binaries = [] #binaries that belong to this template
def get_id(self):
"""
everything in skarphed should have a get_id()
"""
return 0
def set_name(self, name):
self._name = unicode(name)
def get_name(self):
return self._name
def set_description(self, desc):
self._description = unicode(desc)
def get_description(self):
return self._description
def set_author(self, author):
self._author = unicode(author)
def get_author(self):
return self._author
def add_binary(self, bin_id):
if bin_id not in self._binaries:
self._binaries.append(bin_id)
def remove_binary(self, bin_id):
self._binaries.remove(bin_id)
def store(self):
"""
stores the template information in the database
"""
db = Database()
stmnt = "UPDATE OR INSERT INTO TEMPLATE_INFO (TPL_ID, TPL_NAME, TPL_DESC, TPL_AUTHOR) \
VALUES (0, ?, ?, ? ) MATCHING (TPL_ID) ;"
db.query(stmnt, (self._name, self._description, self._author), commit=True)
stmnt = "INSERT INTO TEMPLATE_BINARIES (TPB_TPL_ID, TPB_BIN_ID) VALUES (?,?) ;"
for bin_id in self._binaries:
db.query(stmnt, (0, bin_id), commit=True)
PokeManager.add_activity(ActivityType.TEMPLATE)
def uninstall(self):
"""
Uninstalls this template
"""
db = Database()
for bin_id in self._binaries:
bin = Binary.get_by_id(bin_id)
bin.delete()
stmnt = "DELETE FROM TEMPLATE_BINARIES ;"
db.query(stmnt, commit=True)
#Destroy Pages
Page.delete_all_pages()
#Set Page ID-Generator to 1
db.set_seq_to('SIT_GEN',1)
stmnt = "DELETE FROM TEMPLATE_INFO ;"
db.query(stmnt, commit=True)
PokeManager.add_activity(ActivityType.TEMPLATE)
|
skarphed/skarphed
|
core/lib/template.py
|
Python
|
agpl-3.0
| 12,772
|
"""
Test DistCI input validation routines
Copyright (c) 2012-2013 Heikki Nousiainen, F-Secure
See LICENSE for details
"""
from distci.frontend import validators
class TestValidators:
valid_task_ids = [
'00000000-0000-0000-0000-000000000000',
'be1a4893-aada-4fa1-980c-46fc30e196c6'
]
invalid_task_ids = [
'00000000-0000-0000-0000-00000000000',
'00000000-0000-0000-0000-0000000000000',
'000000000-000-000-000000-00000000000',
'00000000000000000000000000000000',
'00000000-0000-0000-0000-00000000000F',
'00000000-0000-0000-0000-00000000000g',
'..',
'00000000-0000-0000-0000-000000000000/..',
'00000000-0000-0000-0000-00\\000000000',
'00000000-0000-/000-0000-000000000000'
]
valid_job_ids = [
'myjob',
'my_job',
'my-job-2',
'abcdefghijklmnopqrstuvwxyz_ABCDEFGHIJKLMNOPQRSTUVWXYZ-0123456789'
]
invalid_job_ids = [
'/etc/passwd',
'..',
'/',
'my+job',
'(my)job',
'my:job'
]
valid_build_ids = [
'1',
'1234567890123456'
]
invalid_build_ids = [
'/etc/passwd',
'..',
'/',
'foobar',
'123foobar',
'12345678901234567'
]
def test_01_valid_task_ids(self):
for task_id in self.valid_task_ids:
assert validators.validate_task_id(task_id) == task_id, "Validator declined valid task_id %s" % task_id
def test_02_invalid_task_ids(self):
for task_id in self.invalid_task_ids:
assert validators.validate_task_id(task_id) is None, "Validator accepted invalid task_id %s" % task_id
def test_03_valid_job_ids(self):
for job_id in self.valid_job_ids:
assert validators.validate_job_id(job_id) == job_id, "Validator declined valid job_id %s" % job_id
def test_04_invalid_job_ids(self):
for job_id in self.invalid_job_ids:
assert validators.validate_job_id(job_id) is None, "Validator accepted invalid job_id %s" % job_id
def test_05_valid_build_ids(self):
for build_id in self.valid_build_ids:
assert validators.validate_build_id(build_id) == build_id, "Validator declined valid build_id %s" % build_id
def test_06_invalid_build_ids(self):
for build_id in self.invalid_build_ids:
assert validators.validate_build_id(build_id) is None, "Validator accepted invalid build_id %s" % build_id
|
F-Secure/distci
|
src/distci/frontend/tests/test-validators.py
|
Python
|
apache-2.0
| 2,641
|
# -*- coding: utf-8 -*-
import pytest
import glob
import os
import six
import sys
import yaml
from flexmock import flexmock
from devassistant import dapi
from devassistant import utils
from devassistant.dapi import dapicli
from devassistant.exceptions import DapiLocalError
class TestDapicli(object):
'''Test if the Dapi CLI works'''
users_yaml = '''
count: 2
next: null
previous: null
results:
- api_link: http://api/api/users/miro/
codap_set: []
fedora_username: churchyard
full_name: Miro Hroncok
github_username: hroncok
human_link: http://api/user/miro/
id: 1
metadap_set: ['http://api/api/metadaps/python/', 'http://api/api/metadaps/bar/',
'http://api/api/metadaps/foo/']
username: miro
- api_link: http://api/api/users/user/
codap_set: ['http://api/api/metadaps/python/']
fedora_username: null
full_name: ''
github_username: null
human_link: http://api/user/user/
id: 2
metadap_set: []
username: user
'''
search_yaml = '''
count: 1
next: null
previous: null
results:
- content_object:
active: true
api_link: http://dapi/api/metadaps/python/
average_rank: 5.0
comaintainers: ['http://dapi/api/users/dummy1/']
dap_set: []
human_link: http://dapi/dap/python/
id: 1
latest: null
latest_stable: null
package_name: python
rank_count: 1
reports: 0
similar_daps: ['http://dapi/api/metadaps/bar/']
tags: [all, python 2, python 3]
user: http://dapi/api/users/miro/
content_type: metadap
'''
def test_print_users(self, capfd):
'''Test the print of users'''
desired = 'miro (Miro Hroncok)\nuser\n'
flexmock(dapicli).should_receive('data').and_return(yaml.load(TestDapicli.users_yaml))
dapicli.print_users()
out, err = capfd.readouterr()
assert out == desired
def test_search(self, capfd):
'''Test the print of a search results'''
desired = utils.bold('python') + '\n'
flexmock(dapicli).should_receive('data').and_return(yaml.load(TestDapicli.search_yaml))
dapicli.print_search('python')
out, err = capfd.readouterr()
assert out == desired
def test_get_installed_version_of_missing_package(self):
'''Testing updating a DAP'''
flexmock(dapicli).should_receive('get_installed_daps').and_return(['foo'])
assert dapicli.get_installed_version_of('bar') is None
def test_get_installed_version_of(self, capsys):
install_path = '/foo/bar'
yaml_path = install_path + 'meta/baz.yaml'
version = '123'
flexmock(dapicli).should_receive('get_installed_daps').and_return(['foo'])
flexmock(dapicli).should_receive('_install_path').and_return(install_path)
flexmock(yaml).should_receive('load').and_return({'version': version})
# Everything goes fine
flexmock(six.moves.builtins).should_receive('open').and_return(
flexmock(read=lambda: u'qux'))
assert dapicli.get_installed_version_of('foo') == version
# File does not exist
ioerror = IOError("[Errno 2] No such file or directory: '{0}'".format(yaml_path))
flexmock(six.moves.builtins).should_receive('open').and_raise(ioerror)
with pytest.raises(Exception): # TODO maybe change to IOError
dapicli.get_installed_version_of('foo')
def test_strip_version_from_dependency(self):
'''Test a helper funcion _strip_version_from_dependency(dep)'''
s = dapicli._strip_version_from_dependency
assert s('foo >= 1') == 'foo'
assert s('foo>=1') == 'foo'
assert s('foo == 1') == 'foo'
assert s('foo==1') == 'foo'
assert s('foo <=1 ') == 'foo'
assert s('foo<=1') == 'foo'
def test_install_from_path_nodeps(self):
# Functional mocks
fakedap = flexmock(meta={
'package_name': 'foo',
'version': '1.0',
'dependencies': ['bar-1.0'],
}, extract=lambda x: None)
flexmock(dapi.DapChecker).should_receive('check').and_return(True)
flexmock(dapi.Dap).new_instances(fakedap)
flexmock(dapicli).should_receive('get_installed_daps').and_return([])
flexmock(dapicli).should_receive('_install_path').and_return('.')
flexmock(dapicli).should_call('install_dap').with_args('bar').never()
# Filtering off details
flexmock(os).should_receive('mkdir').and_return()
flexmock(os).should_receive('rename').and_return()
dapicli.install_dap_from_path('/foo', nodeps=True)
def test_get_installed_daps_detailed(self):
'''Test function get_installed_daps_detailed()'''
flexmock(dapicli).should_receive('_data_dirs').and_return(['/1', '/2', '/3'])
flexmock(glob).should_receive('glob').with_args('/1/meta/*.yaml').and_return(
['/1/meta/a.yaml', '/1/meta/b.yaml', '/1/meta/c.yaml'])
flexmock(glob).should_receive('glob').with_args('/2/meta/*.yaml').and_return(
['/2/meta/a.yaml', '/2/meta/b.yaml'])
flexmock(glob).should_receive('glob').with_args('/3/meta/*.yaml').and_return(
['/3/meta/a.yaml'])
builtin = 'builtins' if six.PY3 else '__builtin__'
flexmock(sys.modules[builtin]).should_receive('open').and_return(None)
flexmock(yaml).should_receive('load').and_return(
{'version': 1.0})
expected = {
'a': [
{'version': '1.0', 'location': '/1'},
{'version': '1.0', 'location': '/2'},
{'version': '1.0', 'location': '/3'},
],
'b': [
{'version': '1.0', 'location': '/1'},
{'version': '1.0', 'location': '/2'},
],
'c': [
{'version': '1.0', 'location': '/1'},
],
}
details = dapicli.get_installed_daps_detailed()
assert details == expected
class TestUninstall(object):
def setup_class(self):
self.installed_daps = ['foo', 'bar', 'baz']
def test_uninstall_prompt_works(self, monkeypatch):
inp = 'input' if six.PY3 else 'raw_input'
monkeypatch.setattr(six.moves.builtins, inp, lambda x: 'y') # Putting 'y' on fake stdin
flexmock(dapicli).should_receive('get_installed_daps').and_return(self.installed_daps)
flexmock(dapicli).should_receive('_get_dependencies_of').and_return([])
flexmock(dapicli).should_receive('_install_path').and_return('.')
flexmock(os).should_receive('remove').and_return(None)
assert dapicli.uninstall_dap('foo', True) == ['foo']
monkeypatch.setattr(six.moves.builtins, inp, lambda x: 'n') # Putting 'n' on fake stdin
with pytest.raises(DapiLocalError):
dapicli.uninstall_dap('foo', True)
|
oskopek/devassistant
|
test/dapi/test_dapicli.py
|
Python
|
gpl-2.0
| 6,814
|
# Copyright (C) 2004-2014 Aaron Swartz
# Brian Lalor
# Dean Jackson
# Erik Hetzner
# Etienne Millon <me@emillon.org>
# Joey Hess
# Lindsey Smith <lindsey.smith@gmail.com>
# Marcel Ackermann
# Martin 'Joey' Schulze
# Matej Cepl
# W. Trevor King <wking@tremily.us>
#
# This file is part of rss2email.
#
# rss2email is free software: you can redistribute it and/or modify it under
# the terms of the GNU General Public License as published by the Free Software
# Foundation, either version 2 of the License, or (at your option) version 3 of
# the License.
#
# rss2email is distributed in the hope that it will be useful, but WITHOUT ANY
# WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR
# A PARTICULAR PURPOSE. See the GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along with
# rss2email. If not, see <http://www.gnu.org/licenses/>.
"""Define the ``Feed`` class for handling a list of feeds
"""
import codecs as _codecs
import collections as _collections
import os as _os
import json as _json
import pickle as _pickle
import sys as _sys
from . import LOG as _LOG
from . import config as _config
from . import error as _error
from . import feed as _feed
UNIX = False
try:
import fcntl as _fcntl
# A pox on SunOS file locking methods
if 'sunos' not in _sys.platform:
UNIX = True
except:
pass
# Path to the filesystem root, '/' on POSIX.1 (IEEE Std 1003.1-2008).
ROOT_PATH = _os.path.splitdrive(_sys.executable)[0] or _os.sep
class Feeds (list):
"""Utility class for rss2email activity.
>>> import codecs
>>> import os.path
>>> import json
>>> import tempfile
>>> from .feed import Feed
Setup a temporary directory to load.
>>> tmpdir = tempfile.TemporaryDirectory(prefix='rss2email-test-')
>>> configfile = os.path.join(tmpdir.name, 'rss2email.cfg')
>>> with open(configfile, 'w') as f:
... count = f.write('[DEFAULT]\\n')
... count = f.write('to = a@b.com\\n')
... count = f.write('[feed.f1]\\n')
... count = f.write('url = http://a.net/feed.atom\\n')
... count = f.write('to = x@y.net\\n')
... count = f.write('[feed.f2]\\n')
... count = f.write('url = http://b.com/rss.atom\\n')
>>> datafile = os.path.join(tmpdir.name, 'rss2email.json')
>>> with codecs.open(datafile, 'w', Feeds.datafile_encoding) as f:
... json.dump({
... 'version': 1,
... 'feeds': [
... Feed(name='f1').get_state(),
... Feed(name='f2').get_state(),
... ],
... }, f)
>>> feeds = Feeds(configfiles=[configfile,], datafile=datafile)
>>> feeds.load()
>>> for feed in feeds:
... print(feed)
f1 (http://a.net/feed.atom -> x@y.net)
f2 (http://b.com/rss.atom -> a@b.com)
You can index feeds by array index or by feed name.
>>> feeds[0]
<Feed f1 (http://a.net/feed.atom -> x@y.net)>
>>> feeds[-1]
<Feed f2 (http://b.com/rss.atom -> a@b.com)>
>>> feeds['f1']
<Feed f1 (http://a.net/feed.atom -> x@y.net)>
>>> feeds['missing']
Traceback (most recent call last):
...
IndexError: missing
Tweak the feed configuration and save.
>>> feeds[0].to = None
>>> feeds.save()
>>> print(open(configfile, 'r').read().rstrip('\\n'))
... # doctest: +REPORT_UDIFF, +ELLIPSIS
[DEFAULT]
from = user@rss2email.invalid
...
verbose = warning
<BLANKLINE>
[feed.f1]
url = http://a.net/feed.atom
<BLANKLINE>
[feed.f2]
url = http://b.com/rss.atom
Cleanup the temporary directory.
>>> tmpdir.cleanup()
"""
datafile_version = 2
datafile_encoding = 'utf-8'
def __init__(self, configfiles=None, datafile=None, config=None):
super(Feeds, self).__init__()
if configfiles is None:
configfiles = self._get_configfiles()
self.configfiles = configfiles
if datafile is None:
datafile = self._get_datafile()
self.datafile = _os.path.realpath(datafile)
if config is None:
config = _config.CONFIG
self.config = config
self._datafile_lock = None
def __getitem__(self, key):
for feed in self:
if feed.name == key:
return feed
try:
index = int(key)
except ValueError as e:
raise IndexError(key) from e
return super(Feeds, self).__getitem__(index)
def __append__(self, feed):
feed.load_from_config(self.config)
feed = super(Feeds, self).append(feed)
def __pop__(self, index=-1):
feed = super(Feeds, self).pop(index=index)
if feed.section in self.config:
self.config.pop(feed.section)
return feed
def index(self, index):
if isinstance(index, int):
try:
return self[index]
except IndexError as e:
raise _error.FeedIndexError(index=index, feeds=self) from e
elif isinstance(index, str):
try:
index = int(index)
except ValueError:
pass
else:
return self.index(index)
for feed in self:
if feed.name == index:
return feed
try:
super(Feeds, self).index(index)
except (IndexError, ValueError) as e:
raise _error.FeedIndexError(index=index, feeds=self) from e
def remove(self, feed):
super(Feeds, self).remove(feed)
if feed.section in self.config:
self.config.pop(feed.section)
def clear(self):
while self:
self.pop(0)
def _get_configfiles(self):
"""Get configuration file paths
Following the XDG Base Directory Specification.
"""
config_home = _os.environ.get(
'XDG_CONFIG_HOME',
_os.path.expanduser(_os.path.join('~', '.config')))
config_dirs = [config_home]
config_dirs.extend(
_os.environ.get(
'XDG_CONFIG_DIRS',
_os.path.join(ROOT_PATH, 'etc', 'xdg'),
).split(':'))
# reverse because ConfigParser wants most significant last
return list(reversed(
[_os.path.join(config_dir, 'rss2email.cfg')
for config_dir in config_dirs]))
def _get_datafile(self):
"""Get the data file path
Following the XDG Base Directory Specification.
"""
data_home = _os.environ.get(
'XDG_DATA_HOME',
_os.path.expanduser(_os.path.join('~', '.local', 'share')))
data_dirs = [data_home]
data_dirs.extend(
_os.environ.get(
'XDG_DATA_DIRS',
':'.join([
_os.path.join(ROOT_PATH, 'usr', 'local', 'share'),
_os.path.join(ROOT_PATH, 'usr', 'share'),
]),
).split(':'))
datafiles = [_os.path.join(data_dir, 'rss2email.json')
for data_dir in data_dirs]
for datafile in datafiles:
if _os.path.isfile(datafile):
return datafile
return datafiles[0]
def load(self, lock=True, require=False):
_LOG.debug('load feed configuration from {}'.format(self.configfiles))
if self.configfiles:
self.read_configfiles = self.config.read(self.configfiles)
else:
self.read_configfiles = []
_LOG.debug('loaded configuration from {}'.format(
self.read_configfiles))
self._load_feeds(lock=lock, require=require)
def _load_feeds(self, lock, require):
_LOG.debug('load feed data from {}'.format(self.datafile))
if not _os.path.exists(self.datafile):
if require:
raise _error.NoDataFile(feeds=self)
_LOG.info('feed data file not found at {}'.format(self.datafile))
_LOG.debug('creating an empty data file')
dirname = _os.path.dirname(self.datafile)
if dirname and not _os.path.isdir(dirname):
_os.makedirs(dirname, mode=0o700, exist_ok=True)
with _codecs.open(self.datafile, 'w', self.datafile_encoding) as f:
self._save_feed_states(feeds=[], stream=f)
try:
self._datafile_lock = _codecs.open(
self.datafile, 'r', self.datafile_encoding)
except IOError as e:
raise _error.DataFileError(feeds=self) from e
locktype = 0
if lock and UNIX:
locktype = _fcntl.LOCK_EX
_fcntl.flock(self._datafile_lock.fileno(), locktype)
self.clear()
level = _LOG.level
handlers = list(_LOG.handlers)
feeds = []
try:
data = _json.load(self._datafile_lock)
except ValueError as e:
_LOG.info('could not load data file using JSON')
data = self._load_pickled_data(self._datafile_lock)
version = data.get('version', None)
if version != self.datafile_version:
data = self._upgrade_state_data(data)
for state in data['feeds']:
feed = _feed.Feed(name='dummy-name')
feed.set_state(state)
if 'name' not in state:
raise _error.DataFileError(
feeds=self,
message='missing feed name in datafile {}'.format(
self.datafile))
feeds.append(feed)
_LOG.setLevel(level)
_LOG.handlers = handlers
self.extend(feeds)
if locktype == 0:
self._datafile_lock.close()
self._datafile_lock = None
for feed in self:
feed.load_from_config(self.config)
feed_names = set(feed.name for feed in self)
order = _collections.defaultdict(lambda: (1e3, ''))
for i,section in enumerate(self.config.sections()):
if section.startswith('feed.'):
name = section[len('feed.'):]
order[name] = (i, name)
if name not in feed_names:
_LOG.debug(
('feed {} not found in feed file, '
'initializing from config').format(name))
self.append(_feed.Feed(name=name, config=self.config))
feed_names.add(name)
def key(feed):
return order[feed.name]
self.sort(key=key)
def _load_pickled_data(self, stream):
_LOG.info('try and load data file using Pickle')
with open(self.datafile, 'rb') as f:
feeds = list(feed.get_state() for feed in _pickle.load(f))
return {
'version': self.datafile_version,
'feeds': feeds,
}
def _upgrade_state_data(self, data):
version = data.get('version', 'unknown')
if version == 1:
for feed in data['feeds']:
seen = feed['seen']
for guid,id_ in seen.items():
seen[guid] = {'id': id_}
return data
raise NotImplementedError(
'cannot convert data file from version {} to {}'.format(
version, self.datafile_version))
def save(self):
dst_config_file = _os.path.realpath(self.configfiles[-1])
_LOG.debug('save feed configuration to {}'.format(dst_config_file))
for feed in self:
feed.save_to_config()
dirname = _os.path.dirname(dst_config_file)
if dirname and not _os.path.isdir(dirname):
_os.makedirs(dirname, mode=0o700, exist_ok=True)
tmpfile = dst_config_file + '.tmp'
with open(tmpfile, 'w') as f:
self.config.write(f)
f.flush()
_os.fsync(f.fileno())
_os.rename(tmpfile, dst_config_file)
self._save_feeds()
def _save_feeds(self):
_LOG.debug('save feed data to {}'.format(self.datafile))
dirname = _os.path.dirname(self.datafile)
if dirname and not _os.path.isdir(dirname):
_os.makedirs(dirname, mode=0o700, exist_ok=True)
tmpfile = self.datafile + '.tmp'
with _codecs.open(tmpfile, 'w', self.datafile_encoding) as f:
self._save_feed_states(feeds=self, stream=f)
f.flush()
_os.fsync(f.fileno())
_os.rename(tmpfile, self.datafile)
if UNIX and self._datafile_lock is not None:
self._datafile_lock.close() # release the lock
self._datafile_lock = None
def _save_feed_states(self, feeds, stream):
_json.dump(
{'version': self.datafile_version,
'feeds': list(feed.get_state() for feed in feeds),
},
stream,
indent=2,
separators=(',', ': '),
)
stream.write('\n')
def new_feed(self, name=None, prefix='feed-', **kwargs):
"""Return a new feed, possibly auto-generating a name.
>>> feeds = Feeds()
>>> print(feeds.new_feed(name='my-feed'))
my-feed (None -> a@b.com)
>>> print(feeds.new_feed())
feed-0 (None -> a@b.com)
>>> print(feeds.new_feed())
feed-1 (None -> a@b.com)
>>> print(feeds.new_feed(name='feed-1'))
Traceback (most recent call last):
...
rss2email.error.DuplicateFeedName: duplicate feed name 'feed-1'
"""
feed_names = [feed.name for feed in self]
if name is None:
i = 0
while True:
name = '{}{}'.format(prefix, i)
if name not in feed_names:
break
i += 1
elif name in feed_names:
feed = self[name]
raise _error.DuplicateFeedName(name=feed.name, feed=feed)
feed = _feed.Feed(name=name, **kwargs)
self.append(feed)
return feed
|
sciunto/rss2email
|
rss2email/feeds.py
|
Python
|
gpl-2.0
| 14,342
|
import six
import json
from abc import abstractproperty, abstractmethod
from requests.structures import CaseInsensitiveDict
class BaseProcessor(object):
NAME = None
def process_raw(self, raw_doc, **kwargs):
pass # pragma: no cover
def process_normalized(self, raw_doc, normalized, **kwargs):
pass # pragma: no cover
@abstractmethod
def documents(self, *sources):
'''
an iterator that will return documents
'''
raise NotImplementedError
class BaseDatabaseManager(object):
'''A base class for database managers in the scrapi processing module
Must handle setup, teardown, and multi-process initialization of database connections
All errors should be logged, but not thrown
'''
@abstractmethod
def setup(self):
'''Sets up the database connection. Returns True if the database connection
is successful, False otherwise
'''
raise NotImplementedError
@abstractmethod
def tear_down(self):
'''Tears down the database connection.
'''
raise NotImplementedError
@abstractmethod
def clear(self, force=False):
'''Deletes everything in a table/keyspace etc
Should fail if called on the production database
for testing purposes only
'''
raise NotImplementedError
@abstractmethod
def celery_setup(self, *args, **kwargs):
'''Performs the necessary operations to allow a new process to connect to the database
'''
raise NotImplementedError
class BaseHarvesterResponse(object):
"""A parody of requests.response but stored in a database for caching
Should reflect all methods of a response object
Contains an additional field time_made, self-explanatory
"""
class DoesNotExist(Exception):
pass
@abstractproperty
def method(self):
raise NotImplementedError
@abstractproperty
def url(self):
raise NotImplementedError
@abstractproperty
def ok(self):
raise NotImplementedError
@abstractproperty
def content(self):
raise NotImplementedError
@abstractproperty
def encoding(self):
raise NotImplementedError
@abstractproperty
def headers_str(self):
raise NotImplementedError
@abstractproperty
def status_code(self):
raise NotImplementedError
@abstractproperty
def time_made(self):
raise NotImplementedError
@classmethod
@abstractmethod
def get(self, url=None, method=None):
raise NotImplementedError
@abstractmethod
def save(self):
raise NotImplementedError
@abstractmethod
def update(self, **kwargs):
raise NotImplementedError
def json(self):
try:
content = self.content.decode('utf-8')
except AttributeError: # python 3eeeee!
content = self.content
return json.loads(content)
@property
def headers(self):
return CaseInsensitiveDict(json.loads(self.headers_str))
@property
def text(self):
return six.u(self.content)
|
mehanig/scrapi
|
scrapi/processing/base.py
|
Python
|
apache-2.0
| 3,170
|
# -*- coding: utf-8 -*-
"""
Created on Wed Mar 22 09:28:26 2017
@author: A
"""
import numpy as np
import matplotlib.pyplot as plt
plt.close('all')
nCell=3
betaG=0.6
betaC=0.3
betaStart=0.1
betaEnd=0.9
betaStep=0.001
def TTFC(nCell,betaC,betaG):
if (np.abs(betaC-betaG)<1.0e-6):
return np.pi/4.
else:
return (betaC/betaG)**2*np.cos(np.pi*nCell/(2*betaC/betaG))*(-1)**((nCell-1)/2)/(nCell*((betaC/betaG)**2-1))
def TTFS(nCell,betaC,betaG):
if (np.abs(betaC-betaG)<1.0e-6):
return np.pi/4.
else:
return (betaC/betaG)**2*np.sin(np.pi*nCell/(2*betaC/betaG))*(-1)**((nCell+2)/2)/(nCell*((betaC/betaG)**2-1))
def TTF(nCell,betaC,betaG):
if (np.mod(nCell,2)==0):
return TTFS(nCell,betaC,betaG)
elif (np.mod(nCell,2)==1):
return TTFC(nCell,betaC,betaG)
else:
pass
def TTFGen(nCell,betaG,betaStart,betaEnd,betaStep):
betaArray=np.arange(betaStart,betaEnd,betaStep)
TTFArray=np.zeros_like(betaArray)
nBetaC=0
for iBetaC in betaArray:
TTFArray[nBetaC]=TTF(nCell,iBetaC,betaG)
nBetaC+=1
return betaArray,TTFArray
betaArray,TTFArray=TTFGen(nCell,betaG,betaStart,betaEnd,betaStep)
plt.figure('Beta - TTF')
plt.plot(betaArray,TTFArray,'.')
|
iABC2XYZ/abc
|
Scripts/TTFGen/TTF.py
|
Python
|
gpl-3.0
| 1,362
|
import numpy as np
from skimage.color import rgb2gray, rgb2hsv, rgb2lab, rgb2luv
from scipy.stats import skew
from sklearn.metrics.cluster import entropy
import time
def features(img, kernels):
"""
Implements a function to calculate the feature extraction from a block of image
Parameters
----------
img: 2D array
The image for calculate its features
kernels: gabor kernels
A list of gabor kernels
Returns:
Two arrays X and y, with the features. X save the features values and y save the value 0 or 1.
"""
pix = img.get_random_pixel()
blk = img.get_block(pix)
val = values(blk, kernels)
mel = img.get_ground_pixel(pix)
return val, mel
def values(blk, kernels):
rgb = rgb_features(blk) # 15
hsv = hsv_features(blk) # 15
lab = lab_features(blk) # 15
luv = luv_features(blk) # 15
# start_time = time.time()
gab = gabor_filter(blk, kernels) # the number of features depends on config parameters (4, 8, ...)
# print("--- %s seconds ---" % (time.time() - start_time))
return [*rgb, *hsv, *lab, *luv, *gab] # 60 + ngabor
def rgb_features(block):
return [*mean(block), *std_dev(block), *skew_(block), *variance(block), *entropy_(block)]
def hsv_features(block):
blk = rgb2hsv(block)
return [*mean(blk), *std_dev(blk), *skew_(blk), *variance(blk), *entropy_(blk)]
def lab_features(block):
blk = rgb2lab(block)
return [*mean(blk), *std_dev(blk), *skew_(blk), *variance(blk), *entropy_(blk)]
def luv_features(block):
blk = rgb2luv(block)
return [*mean(blk), *std_dev(blk), *skew_(blk), *variance(blk), *entropy_(blk)]
def mean(block):
"""
Function to calculate the mean of the block
Returns
-------
A 1D array with 3 fields
"""
a = np.mean(block[:, :, 0])
b = np.mean(block[:, :, 1])
c = np.mean(block[:, :, 2])
return [a, b, c]
def std_dev(block):
"""
Function to calculate the standard deviation of the block
Returns
-------
A 1D array with 3 fields
"""
a = np.std(block[:, :, 0])
b = np.std(block[:, :, 1])
c = np.std(block[:, :, 2])
return [a, b ,c]
def skew_(block):
"""
Function to calculate the skewness of the block
"""
a = skew(block[:, :, 0].flatten())
b = skew(block[:, :, 1].flatten())
c = skew(block[:, :, 2].flatten())
return [a, b, c]
def variance(block):
"""
Function to calculate the variance of the block
"""
a = np.var(block[:, :, 0])
b = np.var(block[:, :, 1])
c = np.var(block[:, :, 2])
return [a, b, c]
def entropy_(block):
"""
Function to calculate the variance of the block
"""
a = entropy(block[:, :, 0])
b = entropy(block[:, :, 1])
c = entropy(block[:, :, 2])
return [a, b, c]
def gabor_filter(block, kernels):
"""
Applys the gabor kernels to a block of pixels
"""
block = rgb2gray(block)
feats = np.zeros((len(kernels)), dtype=np.double)
for index, kernel in enumerate(kernels):
feats[index] = np.mean(kernel.magnitude(block))
return feats
|
benitesf/Skin-Lesion-Analysis-Towards-Melanoma-Detection
|
features_extraction/methods/second_feature_extraction.py
|
Python
|
mit
| 3,137
|
# -*- coding: utf-8 -*-
"""
model.users.py
~~~~~~~~~~~
Manages the users.
> db.users.findOne()
{
"_id" : ObjectId("4f4a8387d8e40802ea000001"),
"description" : "",
"email" : "admin@bombolone.com",
"image" : [],
"lan" : "en",
"language" : "English",
"location" : "",
"name" : "Admin Name",
"password" : "9c1303484c9e5e33f14a0da9628478f6e3d62b610192023a7bbd73250516f069df18b500",
"rank" : 10,
"status" : 1,
"time_zone" : "Europe/London",
"username" : "Admin",
"web" : ""
}
:copyright: (c) 2014 by @zizzamia
:license: BSD (See LICENSE for details)
"""
import re
from datetime import datetime
from flask import current_app, g
from pymongo import ASCENDING, DESCENDING
# Imports inside Bombolone
from config import ACTIVATED
from shared import app, db
from model_engine import db_engine
from core.utils import ensure_objectid, is_iterable
def find(user_id=None,
username=None,
email=None,
rank=None,
lan=None,
expand_rank=False,
sorted_by='username',
sort_ascending=True,
only_one=False,
my_rank=None,
my_id=None):
"""
Returns a list of users or a single user, if user_id or only_one are specified.
user_id: a single user identifier (a string or an ObjectId) or a list of them
username: the unique user's name
sort_ascending: if True, sorts the results from first to last, if False sorts them the other way
only_one: if True, returns one tag at most
"""
def denormalize(user):
if user is None:
return user
if expand_rank:
user['rank_name'] = { x['rank'] : x['name'] for x in g.db.ranks.find() }[user['rank']]
# Data we want to show to our Soft Eng or the private user
if isinstance(my_rank, int) and my_rank <= 70:
return user
# Data we want to show to our private user
if str(my_id) == str(user["_id"]):
return user
# Data we want to show after sign in, to all
user_to_show = {
"_id" : user.get("_id", None),
"rank": user.get("rank", None),
"description": user.get("description", ""),
"image": user.get("image", ""),
"location": user.get("location", ""),
"name": user.get("name", ""),
"username": user.get("username", ""),
"web": user.get("web", "")
}
return user_to_show
if username:
if is_iterable(username):
list_users = list(db.users.find({"username" : {"$in": list(username)}}))
return [ denormalize(u) for u in list_users ]
else:
regex = re.compile('^'+username+'$', re.IGNORECASE)
return denormalize(db.users.find_one({"username" : regex}))
# First, builds the filter conditions list
conditions = []
if email:
email = email.lower()
conditions.append({'email': email})
if rank:
conditions.append({'rank': rank})
if lan:
conditions.append({'lan': lan})
return db_engine(collection=db.users,
item_id=user_id,
only_one=only_one,
conditions=conditions,
sorted_by=sorted_by,
sort_ascending=sort_ascending,
denormalize=denormalize)
def create(user=None):
"""
Create user
"""
if not "username" in user:
return (None, "error_model_users_create_username")
if not "lan" in user:
return (None, "error_model_users_create_ot_lan")
if not "language" in user:
return (None, "error_model_users_create_ot_language")
new_user = {
"created": datetime.utcnow(),
"description": "",
"email": user.get("email"),
"image": [],
"location": "",
"name": "",
"username": user["username"],
"password": user["password"],
"rank": 80,
"lan": user.get("lan", "en"),
"language": user.get("language", "english"),
"time_zone": "Europe/London",
"web": "",
"status": user.get("status", ACTIVATED)
}
_id = db.users.insert(new_user)
return (_id, None)
def update(user_id=None,
email=None,
lan=None,
language=None,
image=None,
password=None,
status=None,
unset=None,
pull=None,
addToSet=None,
user=None):
"""
Update one or more users
"""
user_id = ensure_objectid(user_id)
if user_id is None:
return False
if user:
db.users.update({"_id": user_id}, user)
return True
if pull:
db.users.update({"_id": user_id}, {"$pull": pull })
return True
if addToSet:
db.users.update({"_id": user_id}, {"$addToSet": addToSet })
return True
# First, builds the filter conditions list
dict_set = {}
local = locals()
for item in ["email",
"lan",
"language",
"image",
"password",
"status"]:
if not local[item] is None:
dict_set[item] = local[item]
if not unset is None:
dict_unset = {}
for item in unset:
dict_unset[item] = 1
if is_iterable(user_id):
for _id in user_id:
if ensure_objectid(_id):
if unset:
db.users.update({"_id": _id}, {"$unset": dict_unset}, False)
db.users.update({"_id": _id}, {"$set": dict_set})
else:
if unset:
db.users.update({"_id": user_id}, {"$unset": dict_unset}, False)
db.users.update({"_id": user_id}, {"$set": dict_set})
db.users.ensure_index('username')
return True
def remove(username=None, my_rank=None):
"""
Update one or more users
"""
if my_rank is None or my_rank > 30:
return False
if username is None:
return False
else:
user = db.users.find_one({"username": username})
if user is None:
return False
if user.get('rank') is None or user["rank"] <= my_rank:
return False
db.users.remove({"username": username})
return True
|
katiecheng/Bombolone
|
model/users.py
|
Python
|
bsd-3-clause
| 6,341
|
#!/usr/bin/env python
#
# This library is free software, distributed under the terms of
# the GNU Lesser General Public License Version 3, or any later version.
# See the COPYING file included in this archive
#
# Thanks to Paul Cannon for IP-address resolution functions (taken from aspn.activestate.com)
import argparse
import os, sys, time, signal
amount = 0
def destroyNetwork(nodes):
print 'Destroying Kademlia network...'
i = 0
for node in nodes:
i += 1
hashAmount = i*50/amount
hashbar = '#'*hashAmount
output = '\r[%-50s] %d/%d' % (hashbar, i, amount)
sys.stdout.write(output)
time.sleep(0.15)
os.kill(node, signal.SIGTERM)
print
def main():
parser = argparse.ArgumentParser(description="Launch a network of dht nodes")
parser.add_argument("amount_of_nodes",
help="The number of nodes to create",
type=int)
parser.add_argument("--nic_ip_address",
help="The network interface on which these nodes will listen for connections "
"from each other and from other nodes. If omitted, an attempt will be "
"made to automatically determine the system's IP address, but this may "
"result in the nodes being reachable only from this system")
args = parser.parse_args()
global amount
amount = args.amount_of_nodes
if args.nic_ip_address:
ipAddress = args.nic_ip_address
else:
import socket
ipAddress = socket.gethostbyname(socket.gethostname())
print 'Network interface IP address omitted; using %s...' % ipAddress
startPort = 4000
port = startPort+1
nodes = []
print 'Creating Kademlia network...'
try:
nodes.append(os.spawnlp(os.P_NOWAIT, 'lbrynet-launch-node', 'lbrynet-launch-node', str(startPort)))
for i in range(amount-1):
time.sleep(0.15)
hashAmount = i*50/amount
hashbar = '#'*hashAmount
output = '\r[%-50s] %d/%d' % (hashbar, i, amount)
sys.stdout.write(output)
nodes.append(os.spawnlp(os.P_NOWAIT, 'lbrynet-launch-node', 'lbrynet-launch-node', str(port), ipAddress, str(startPort)))
port += 1
except KeyboardInterrupt:
'\nNetwork creation cancelled.'
destroyNetwork(nodes)
sys.exit(1)
print '\n\n---------------\nNetwork running\n---------------\n'
try:
while 1:
time.sleep(1)
except KeyboardInterrupt:
pass
finally:
destroyNetwork(nodes)
if __name__ == '__main__':
main()
|
DaveA50/lbry
|
lbrynet/create_network.py
|
Python
|
mit
| 2,692
|
# -*- coding: UTF-8 -*-
#
# Python library for parsing public post journals (postlister) in Norway.
#
# Based on the scraper advanced-scraping-pdf
#
# See also
# https://views.scraperwiki.com/run/pdf-to-html-preview-1/
# Possible sources using format 1 pdf:
# www.bydel-ullern.oslo.kommune.no
# www.gravferdsetaten.oslo.kommune.no
# www.halden.kommune.no (done)
# www.havn.oslo.kommune.no (done)
# www.hvaler.kommune.no (done)
# www.kafjord.kommune.no
# www.lier.kommune.no
# www.lindesnes.kommune.no
# www.naroy.kommune.no
# www.saltdal.kommune.no
# www.sogne.kommune.no
# www.vikna.kommune.no
#
# Google search to find more: "Offentlig journal" Seleksjon Sakstittel Dokumenttype Status filetype:pdf
import scraperwiki
import string
import re
from BeautifulSoup import BeautifulSoup
import datetime
import dateutil.parser
def cpu_spent():
import resource
usage = resource.getrusage(resource.RUSAGE_SELF)
return getattr(usage, 'ru_utime') + getattr(usage, 'ru_stime')
def exit_if_no_cpu_left(retval, callback=None, arg = None):
import resource
soft, hard = resource.getrlimit(resource.RLIMIT_CPU)
spent = cpu_spent()
if soft < spent:
if callback is not None:
callback(arg, spent, hard, soft)
print "Running out of CPU, exiting."
exit(retval)
def fetch_url_harder(url, scraper = None):
import urllib2
html = None
for n in [1, 2, 3]:
try:
if None == scraper:
scraper = scraperwiki.scrape
html = scraper(url)
break
except urllib2.URLError, e:
print "URLError fetching " + url + ", trying again"
return html
class JournalParser:
agency = None
debug = False
validdoctypes = ['I', 'U', 'X', 'N']
senderdoctypes = ['I', 'X', 'N']
recipientdoctypes = ['U']
mustfields = {
'agency' : 1,
'docdesc' : 1,
'doctype' : 1,
'caseyear' : 1,
'caseseqnr' : 1,
'casedocseq' : 1,
}
def __init__(self, agency):
self.agency = agency
def is_valid_doctype(self, doctype):
return doctype in self.validdoctypes
def is_sender_doctype(self, doctype):
return doctype in self.senderdoctypes
def is_recipient_doctype(self, doctype):
return doctype in self.recipientdoctypes
def verify_entry(self, entry):
for field in self.mustfields:
if not field in entry:
raise ValueError("Missing required field " + field)
if not self.is_valid_doctype(entry['doctype']):
raise ValueError("Invalid doctype " + doctype)
if -1 != entry['caseid'].find('-'):
raise ValueError("Field caseid should not include dash: " + entry['caseid'])
# Seen in http://home.nuug.no/~pere/nrk-postjournal/Offentlig%20journal%20NRK%200101_15012011.pdf
if 'sender' in entry and -1 != entry['sender'].find("Side: "):
raise ValueError("Field sender got page number, not real content")
#
# Parser of PDFs looking like
# http://www.storfjord.kommune.no/postliste-18-mai-2012.5056067-105358.html (type 1)
# http://www.hadsel.kommune.no/component/docman/doc_download/946-offentlig-postjournal-28032012 (type 2)
# http://www.stortinget.no/Global/pdf/postjournal/pj-2011-06-23.pdf (type 2 variant)
# Note sender/receiver is not yet parsed for type 2 PDFs
class PDFJournalParser(JournalParser):
pagetable = "unparsedpages"
brokenpagetable = "brokenpages"
hiddentext = False
breakonfailure = True
def __init__(self, agency, hiddentext=False):
self.hiddentext = hiddentext
JournalParser.__init__(self, agency=agency)
def is_already_scraped(self, url):
# Ignore entries were sender and recipient is the result of a broken parser (before 2012-05-25)
for sql in ["scrapedurl, sender, recipient from swdata where scrapedurl = '" + url + "' " +
# FIXME Figure out why this do not work
#" and not (sender = 'parse error' or recipient != 'parse error') " +
"limit 1",
"scrapedurl from " + self.brokenpagetable + " where scrapedurl = '" + url + "' limit 1",
"scrapedurl from " + self.pagetable + " where scrapedurl = '" + url + "' limit 1"]:
try:
result = scraperwiki.sqlite.select(sql)
#int sql, " : ", result
if 0 < len(result) and u'scrapedurl' in result[0]:
return True
except Exception as e:
#if ('no such table: %s' % self.pagetable) not in str(e) and 'no such table: swdata' not in str(e):
# raise
#print "Ignoring exception: %s" % e
True
return False
# Check if we recognize the page content, and throw if not
def is_valid_page(self, pdfurl, pagenum, pagecontent):
s = BeautifulSoup(pagecontent)
for t in s.findAll('text'):
if t.text != " ":
if 'Innhold:' == t.text:
s = None
return True
s = None
if self.debug:
print "Unrecognized page format for " + pdfurl
raise ValueError("Unrecognized page format for " + pdfurl)
#
# Split PDF content into pages and store in SQL table for later processing.
# The process is split in two to better handle parge PDFs (like 600 pages),
# without running out of CPU time without loosing track of what is left to
# parse.
def preprocess(self, pdfurl, pdfcontent):
print "Preprocessing PDF " + pdfurl
if not pdfcontent:
raise ValueError("No pdf content passed for " + pdfurl)
if self.hiddentext:
options = '-hidden'
else:
options = ''
xml=scraperwiki.pdftoxml(pdfcontent, options)
if self.debug:
print xml
pages=re.findall('(<page .+?</page>)',xml,flags=re.DOTALL)
xml=None
# print pages[:1][:1000]
pagecount = 0
datastore = []
for page in pages:
pagecount = pagecount + 1
self.is_valid_page(pdfurl, pagecount, page)
data = {
'scrapedurl' : pdfurl,
'pagenum' : pagecount,
'pagecontent' : page,
}
datastore.append(data)
if 0 < len(datastore):
scraperwiki.sqlite.save(unique_keys=['scrapedurl', 'pagenum'], data=datastore, table_name=self.pagetable)
else:
raise ValueError("Unable to find any pages in " + pdfurl)
pages = None
def fetch_and_preprocess(self, pdfurl):
pdfcontent = fetch_url_harder(pdfurl)
self.preprocess(pdfurl, pdfcontent)
pdfcontent = None
def print_entry(self, entrytext):
for i in range(0, len(entrytext)):
print str(i) + ": '" + entrytext[i] + "'"
def parse_entry_type1(self, entrytext, pdfurl):
scrapestamputc = datetime.datetime.now()
entry = {
'agency' : self.agency,
'scrapestamputc' : scrapestamputc,
'scrapedurl' : pdfurl
}
i = 0
while i < len(entrytext):
#print "T: '" + entrytext[i] + "'"
if 'Innhold:' == entrytext[i]:
tittel = ""
# handle multi-line titles
while 'Sakstittel:' != entrytext[i+1]:
tittel = tittel + " " + entrytext[i+1]
i = i + 1
entry['docdesc'] = tittel
if 'Sakstittel:' == entrytext[i]:
sakstittel = ""
while 'DokType' != entrytext[i+1]:
# print "'" + entrytext[i+1] + "'"
sakstittel = sakstittel + " " + entrytext[i+1]
i = i + 1
entry['casedesc'] = sakstittel
if 'DokType' == entrytext[i]: # Values I/U/N/X from NOARK 4 table 14.2.11
entry['doctype'] = entrytext[i+1]
# As seen on http://www.saltdal.kommune.no/images/module.files/2007-05-16.pdf, page 1
if entry['doctype'] == 'S':
entry['doctype'] = 'X'
i = i + 1
if 'Sak/dok nr:' == entrytext[i]:
# FIXME Split and handle combined sak/løpenr
# Use find('penr.:') to avoid non-ascii search string 'Løpenr.:'
caseid = None
lnr = None
if -1 != entrytext[i+4].find('penr.:'):
caseid = entrytext[i+1] + entrytext[i+2]
lnr = entrytext[i+3]
i = i + 4
elif -1 != entrytext[i+3].find('penr.:'):
caseid = entrytext[i+1]
lnr = entrytext[i+2]
i = i + 3
elif -1 != entrytext[i+2].find('penr.:'):
caseid, lnr = entrytext[i+1].split(" ")
i = i + 2
caseyear, caseseqnr = caseid.split("/")
entry['caseyear'] = int(caseyear)
caseseqnr, casedocseq = caseseqnr.split("-")
entry['caseseqnr'] = int(caseseqnr)
entry['casedocseq'] = int(casedocseq)
entry['caseid'] = caseyear + "/" + caseseqnr
journalseqnr, journalyear = lnr.split("/")
entry['journalid'] = journalyear + "/" + journalseqnr
entry['journalyear'] = int(journalyear)
entry['journalseqnr'] = int(journalseqnr)
# if -1 != text[i].find('penr.:'): # Use find('penr.:') to avoid non-ascii search string 'Løpenr.:'
# str = text[i-1]
# print "S: '" + str + "'"
# data['journalid'] = str
# # FIXME handle combined sak/løpenr
if 'Journaldato:' == entrytext[i]:
entry['recorddate'] = dateutil.parser.parse(entrytext[i-1], dayfirst=True)
if 'Dok.dato:' == entrytext[i]:
entry['docdate'] = dateutil.parser.parse(entrytext[i-1], dayfirst=True)
if 'Tilg.kode Hjemmel:' == entrytext[i] and 'Avsender\mottaker:' != entrytext[i+1]:
entry['exemption'] = entrytext[i+1]
i = i + 1
if 'Tilg.kode' == entrytext[i]:
entry['accesscode'] = entrytext[i+1]
i = i + 1
if 'Hjemmel:' == entrytext[i]:
entry['exemption'] = entrytext[i+1]
i = i + 1
if 'Avsender\mottaker:' == entrytext[i]:
if i+1 < len(entrytext): # Non-empty field
fratil = entrytext[i+1]
i = i + 1
if self.is_sender_doctype(entry['doctype']):
entry['sender'] = fratil
elif self.is_recipient_doctype(entry['doctype']):
entry['recipient'] = fratil
else:
raise ValueError("Case " + entry['caseid'] + " Sender/Recipient with doctype " + entry['doctype'] + " != I/U/X/N in " + pdfurl)
if self.debug:
print entry
i = i + 1
return entry
def parse_case_journal_ref(self, entry, reftext, pdfurl):
try:
# FIXME Split and handle combined sak/loepenr
# Use find('penr.:') to avoid non-ascii search string 'Loepenr.:'
caseid = None
lnr = None
if 4 == len(reftext):
# print "4 " + str(reftext)
caseid = reftext[0] + reftext[1]
lnr = reftext[2] + reftext[3]
# print str(caseid) + " " + str(lnr)
elif 3 == len(reftext):
if -1 != reftext[0].find("/") and -1 != reftext[2].find("/"):
# print "31"
caseid = reftext[0] + reftext[1]
lnr = reftext[2]
elif -1 != reftext[2].find("/"):
# print "32"
caseid = reftext[0] + reftext[1]
lnr = reftext[2]
elif -1 == reftext[2].find("/"):
# print "33"
caseid = reftext[0]
lnr = reftext[1] + reftext[2]
elif 2 == len(reftext):
if -1 == reftext[1].find("/"):
# print "21"
s = reftext[0] + reftext[1]
# print "S: " + s
caseid, lnr = s.split(" ")
elif -1 != reftext[1].find("/"):
# print "22"
caseid = reftext[0]
lnr = reftext[1]
elif 1 == len(reftext):
caseid, lnr = reftext[0].split(" ")
else:
raise ValueError("Unable to parse entry " + str(reftext) + " in " + pdfurl)
# print "C: " + caseid + " L: " + lnr
caseyear, caseseqnr = caseid.split("/")
entry['caseyear'] = int(caseyear)
caseseqnr, casedocseq = caseseqnr.split("-")
entry['caseseqnr'] = int(caseseqnr)
entry['casedocseq'] = int(casedocseq)
entry['caseid'] = caseyear + "/" + caseseqnr
journalseqnr, journalyear = lnr.split("/")
entry['journalid'] = journalyear + "/" + journalseqnr
entry['journalyear'] = int(journalyear)
entry['journalseqnr'] = int(journalseqnr)
except:
print "Unable to parse " + str(reftext)
return entry
def test_parse_case_journal_ref(self):
entry = {}
self.parse_case_journal_ref(entry, [u'2008/16414-', u'23', u'15060/2012'], "")
self.parse_case_journal_ref(entry, [u'2011/15972-1 102773/201', u'1'], "")
self.parse_case_journal_ref(entry, [u'2010/2593-2', u'103004/201', u'1'], "")
self.parse_case_journal_ref(entry, [u'2011/13415-', u'22', u'100077/201', u'1'], "")
def parse_entry_type2(self, entrytext, pdfurl):
scrapestamputc = datetime.datetime.now()
entry = {
'agency' : self.agency,
'scrapestamputc' : scrapestamputc,
'scrapedurl' : pdfurl
}
i = 0
avsender = []
mottaker = []
while i < len(entrytext):
if 'Innhold:' == entrytext[i]:
tittel = ""
# handle multi-line titles
while 'Sakstittel:' != entrytext[i+1]:
tittel = tittel + entrytext[i+1]
i = i + 1
entry['docdesc'] = tittel
if 'Sakstittel:' == entrytext[i]:
sakstittel = ""
# Klassering er i en annen dokumenttype
while 'DokType' != entrytext[i+1] and 'Dok.Type:' != entrytext[i+1] and 'Klassering:' != entrytext[i+1]:
# print "'" + entrytext[i+1] + "'"
sakstittel = sakstittel + entrytext[i+1]
i = i + 1
entry['casedesc'] = sakstittel
i = i + 1
if 'DokType' == entrytext[i] or 'Dok.Type:' == entrytext[i]: # Values I/U/N/X from NOARK 4 table 14.2.11
entry['doctype'] = entrytext[i+1]
# As seen on http://www.uis.no/getfile.php/Journal%20200612.pdf
if entry['doctype'] == 'S':
entry['doctype'] = 'X'
i = i + 1
if 'Sak/dok nr:' == entrytext[i] or 'Sak/dok.nr:' == entrytext[i]:
endi = i
while endi < len(entrytext):
if -1 != entrytext[endi].find('penr.:') or -1 != entrytext[endi].find('penr:'):
break
endi = endi + 1
entry = self.parse_case_journal_ref(entry, entrytext[i+1:endi], pdfurl)
i = endi + 1
# if -1 != text[i].find('penr.:'): # Use find('penr.:') to avoid non-ascii search string 'Løpenr.:'
# str = text[i-1]
# print "S: '" + str + "'"
# data['journalid'] = str
# # FIXME handle combined sak/løpenr
if 'Journaldato:' == entrytext[i]:
entry['recorddate'] = dateutil.parser.parse(entrytext[i-1], dayfirst=True)
if 'Dok.dato:' == entrytext[i]:
entry['docdate'] = dateutil.parser.parse(entrytext[i-1], dayfirst=True)
if 'Tilg.kode Hjemmel:' == entrytext[i] and '(enhet/initialer):' != entrytext[i+2]:
entry['exemption'] = entrytext[i+1]
i = i + 1
if 'Tilg.kode' == entrytext[i]:
entry['accesscode'] = entrytext[i+1]
i = i + 1
if 'Hjemmel:' == entrytext[i]:
entry['exemption'] = entrytext[i+1]
i = i + 1
# if -1 != text[i].find('Avs./mottaker:'):
# FIXME Need to handle senders and receivers
if 'Mottaker' == entrytext[i]:
mottaker.append(entrytext[i-1])
if 'Avsender' == entrytext[i]:
avsender.append(entrytext[i-1])
# entry['sender'] = 'parse error'
# entry['recipient'] = 'parse error'
i = i + 1
if 0 < len(mottaker):
entry['recipient'] = string.join(mottaker, ", ")
if 0 < len(avsender):
entry['sender'] = string.join(avsender, ", ")
return entry
def parse_page(self, pdfurl, pagenum, pagecontent):
print "Scraping " + pdfurl + " page " + str(pagenum)
s = BeautifulSoup(pagecontent)
datastore = []
text = []
linecount = 0
if self.debug:
print s
for t in s.findAll('text'):
if t.text != " ":
text.append(t.text)
if self.debug:
print str(linecount) + ": " + t.text
# FIXME Remove length limit when working
# if 100 <= linecount:
# break
linecount = linecount + 1
# if -1 != t.text.find("Side:"):
# print t.text
s = None
# print "Found " + str(linecount) + " lines/text fragments in the PDF"
if len(text) < linecount:
raise ValueError("Text array too sort!")
# First count how many entries to expect on this page, to be able to
# verify that all of them were found.
entrycount = 0
i = 0
while i < len(text):
if 'Innhold:' == text[i]:
entrycount = entrycount + 1
i = i + 1
i = 0
while i < len(text):
if self.debug:
print "T: '" + text[i] + "'"
if self.debug and -1 != text[i].find("Side:"):
print text[i]
if 'Innhold:' == text[i]:
endi = i + 1
pdfparser = None
format = "unknown"
while endi < len(text):
if 'Klassering:' == text[endi]:
pdfparser = self.parse_entry_type2
format = "type2"
if 'Avsender\mottaker:' == text[endi]:
pdfparser = self.parse_entry_type1
format = "type1"
if 'Innhold:' == text[endi]:
break
endi = endi + 1
if self.debug:
print "Entry " + str(entrycount) + " from " + str(i) + " to " + str(endi) + " ie " + str(endi - i) + " lines"
try:
if pdfparser is None:
raise ValueError("Unrecognized page format in " + pdfurl)
entry = pdfparser(text[i:endi], pdfurl)
if 'caseid' not in entry or entry['caseid'] is None or \
not self.is_valid_doctype(entry['doctype']):
raise ValueError("Unable to parse " + pdfurl + " as format " + format + " [" + str(entry) + "]")
# print entry
datastore.append(entry)
i = endi - 2
except:
self.print_entry(text[i:endi])
raise
i = i + 1
# print data
# print "Found " + str(len(datastore)) + " of " + str(entrycount) + " entries"
if entrycount != len(datastore):
# print text
raise ValueError("Unable to parse all entries in " + pdfurl)
if 0 == len(datastore):
print "Unable to find any entries in " + pdfurl
else:
scraperwiki.sqlite.save(unique_keys=['caseid', 'casedocseq'], data=datastore)
datastore = None
text = None
def process_pages(self):
brokenpages = 0
try:
sqlselect = "* from " + self.pagetable + " limit 1"
pageref = scraperwiki.sqlite.select(sqlselect)
while pageref:
scrapedurl = pageref[0]['scrapedurl']
pagenum = pageref[0]['pagenum']
pagecontent = pageref[0]['pagecontent']
# print "Found " + scrapedurl + " page " + str(pagenum) + " length " + str(len(pagecontent))
try:
sqldelete = "delete from " + self.pagetable + " where scrapedurl = '" + scrapedurl + "' and pagenum = " + str(pagenum)
self.parse_page(scrapedurl, pagenum, pagecontent)
# print "Trying to: " + sqldelete
scraperwiki.sqlite.execute(sqldelete)
except ValueError, e:
brokenpage = {
'scrapedurl' : scrapedurl,
'pagenum' : pagenum,
'pagecontent' : pagecontent,
'failstamp' : datetime.datetime.now(),
}
print "Unsupported page %d from %s" % (pagenum, scrapedurl)
brokenpages = brokenpages + 1
scraperwiki.sqlite.save(unique_keys=['scrapedurl', 'pagenum'], data=brokenpage, table_name=self.brokenpagetable)
scraperwiki.sqlite.execute(sqldelete)
scraperwiki.sqlite.commit()
pageref = scraperwiki.sqlite.select(sqlselect)
# Last, try some of the broken pages again, in case we got support for handling them in the mean time
try:
# First, check if the table exist
scraperwiki.sqlite.execute("select * from " + self.brokenpagetable)
newtrystamp = datetime.datetime.now()
sqlselect = "* from " + self.brokenpagetable + " where failstamp is NULL or failstamp < '" + str(newtrystamp) + "'" + " limit 1"
try:
pageref = scraperwiki.sqlite.select(sqlselect)
except scraperwiki.sqlite.SqliteError, e:
scraperwiki.sqlite.execute("ALTER TABLE " + self.brokenpagetable + " ADD COLUMN failstamp")
scraperwiki.sqlite.commit()
pageref = scraperwiki.sqlite.select(sqlselect)
pagelimit = 10
while pageref and 0 < pagelimit:
pagelimit = pagelimit - 1
scrapedurl = pageref[0]['scrapedurl']
pagenum = pageref[0]['pagenum']
pagecontent = pageref[0]['pagecontent']
# print "Found " + scrapedurl + " page " + str(pagenum) + " length " + str(len(pagecontent))
try:
sqldelete = "delete from " + self.brokenpagetable + " where scrapedurl = '" + scrapedurl + "' and pagenum = " + str(pagenum)
self.parse_page(scrapedurl, pagenum, pagecontent)
# print "Trying to: " + sqldelete
scraperwiki.sqlite.execute(sqldelete)
except ValueError, e:
brokenpage = {
'scrapedurl' : scrapedurl,
'pagenum' : pagenum,
'pagecontent' : pagecontent,
'failstamp' : newtrystamp,
}
print "Still unsupported page %d from %s" % (pagenum, scrapedurl)
brokenpages = brokenpages + 1
scraperwiki.sqlite.save(unique_keys=['scrapedurl', 'pagenum'], data=brokenpage, table_name=self.brokenpagetable)
scraperwiki.sqlite.commit()
pageref = scraperwiki.sqlite.select(sqlselect)
except:
True # Ignore missing brokenpages table
except scraperwiki.sqlite.SqliteError, e:
print str(e)
raise
if 0 < brokenpages:
raise ValueError("Found %d pages with unsupported format" % brokenpages)
def fieldlist():
import urllib2
import json
scrapers = [
'postliste-universitetet-i-oslo',
'postliste-lindesnes',
'postliste-kristiansund',
'postliste-stortinget',
'postliste-arendal',
'postliste-oep',
'postliste-ballangen',
'postliste-hadsel',
'postliste-storfjord',
'postliste-oslo-havn',
]
keys = {}
for scraper in scrapers:
url = 'https://api.scraperwiki.com/api/1.0/scraper/getinfo?format=jsondict&name=' + scraper + '&version=-1'
response = urllib2.urlopen(url)
html = response.read()
data = json.loads(html)
if 'swdata' in data[0]['datasummary']['tables']:
for key in data[0]['datasummary']['tables']['swdata']['keys']:
key = key.lower()
if key in keys:
keys[key].append(scraper)
else:
keys[key] = [scraper]
def lensort(a, b):
return cmp(len(keys[b]), len(keys[a]))
for key in sorted(keys.keys(), lensort):
print len(keys[key]), key, str(keys[key])
if __name__ == "scraper":
fieldlist()
# Author: https://classic.scraperwiki.com/profiles/pere/ - Petter Reinholdtsen
# Source: https://classic.scraperwiki.com/scrapers/postliste-python-lib
|
Kagee/nuug-postliste-scrapers
|
lib/postlistepythonlib.py
|
Python
|
gpl-3.0
| 26,254
|
# -*- coding: utf-8 -*-
# Copyright (C) 2014-present Taiga Agile LLC
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from django.utils.translation import ugettext as _
from taiga.base.api import validators, serializers
from taiga.base.exceptions import ValidationError
from taiga.projects.models import Membership
from taiga.projects.validators import ProjectExistsValidator
class AssignedToValidator:
def validate_assigned_to(self, attrs, source):
assigned_to = attrs[source]
project = (attrs.get("project", None) or
getattr(self.object, "project", None))
if assigned_to and project:
filters = {
"project_id": project.id,
"user_id": assigned_to.id
}
if not Membership.objects.filter(**filters).exists():
raise ValidationError(_("The user must be a project member."))
return attrs
class PromoteToUserStoryValidator(ProjectExistsValidator, validators.Validator):
project_id = serializers.IntegerField()
|
taigaio/taiga-back
|
taiga/projects/mixins/validators.py
|
Python
|
agpl-3.0
| 1,663
|
def reverse(array, i, j):
while i < j:
array[i], array[j] = array[j], array[i]
i += 1
j -= 1
def reverse_words(string):
arr = string.strip().split() # arr is list of words
n = len(arr)
reverse(arr, 0, n-1)
return " ".join(arr)
if __name__ == "__main__":
test = "I am keon kim and I like pizza"
print(test)
print(reverse_words(test))
|
keon/algorithms
|
algorithms/strings/reverse_words.py
|
Python
|
mit
| 396
|
# -*- encoding: utf-8 -*-
##############################################################################
#
# Copyright (C) 2016 Compassion CH (http://www.compassion.ch)
# Releasing children from poverty in Jesus' name
# @author: Emanuel Cino <ecino@compassion.ch>, Philippe Heer
#
# The licence is in the file __openerp__.py
#
##############################################################################
from openerp import models, fields, api, _
from openerp.addons.message_center_compassion.mappings \
import base_mapping as mapping
class ChildLifecycleEvent(models.Model):
""" A child lifecycle event (BLE) """
_name = 'compassion.child.ble'
_description = 'Child Lifecycle Event'
_inherit = 'translatable.model'
_order = 'date desc, id desc'
child_id = fields.Many2one(
'compassion.child', 'Child', required=True, ondelete='cascade',
readonly=True)
global_id = fields.Char(readonly=True, required=True)
date = fields.Datetime(readonly=True)
type = fields.Selection([
('Planned Exit', 'Planned Exit'),
('Registration', 'Registration'),
('Reinstatement', 'Reinstatement'),
('Transfer', 'Transfer'),
('Transition', 'Transition'),
('Unplanned Exit', 'Unplanned Exit'),
], readonly=True)
# All reasons for all request types
request_reason = fields.Selection([
# Planned Exit
('Reached Maximum Age', _('{he} reached maximum age')),
('Reached the end of the relevant programs available at the church '
'partner', _('{he} reached the end of the relevant programs '
'available')),
('Reached max age or completion date but did not fulfill '
'completion plan',
'Reached max age or completion date but did not fulfill '
'completion plan'),
('Reached the end of relevant programs available at ICP',
'Reached the end of relevant programs available at ICP'),
# Reinstatement
('Beneficiary Exit was mistake', 'Exit was mistake'),
('Beneficiary Moved Back', 'Beneficiary Moved Back'),
('Family Needs Help Again', 'Family Needs Help Again'),
('No Longer Sponsored by Another Organization',
'No Longer Sponsored by Another Organization'),
('Other (enter reason below)', 'Other'),
('Other (Enter Reason)', 'Other (Enter Reason)'),
# Transfer
('Relocation: Caregiver\'s Work.', 'Relocation: Caregiver\'s Work.'),
('Relocation: Moved To Another Area',
'Relocation: Moved To Another Area'),
('Relocation: Vocational/Technical Or Higher Education.',
'Relocation: Vocational/Technical Or Higher Education.'),
('Programming Availability Is A Better Fit For Beneficiary',
'Programming Availability Is A Better Fit For Beneficiary'),
('Project Closure', 'Project Closure'),
('Project Downsizing', 'Project Downsizing'),
('Special Needs', 'Special Needs'),
('Beneficiary/Caregiver moving to another location',
'Beneficiary/Caregiver moving to another location'),
# Unplanned Exit
('Child / Caregiver does not comply with policies',
_('{he} does not comply with policies')),
('Child in system under two numbers (enter other number in the '
'Comments box below)', _('{he} is in system under two numbers')),
('Child places others at risk', _('{he} places others at risk')),
('Child sponsored by another organization',
_('{he} is sponsored by another organization')),
('Death of caregiver creates situation where child cannot continue',
_('{his} caregiver died')),
('Death of child', '{he} passed away'),
('Family Circumstances Have Changed Positively So That Child No '
'Longer Needs Compassion\'s Assistance',
_('family circumstances have changed positively')),
('Family moved where a Compassion project with relevant programs is '
'not available', _('{his} family moved where a Compassion project '
'with relevant programs is not available')),
('Project or program closure', _('of the project closure')),
('Taken out of project by parents, or family no longer interested '
'in program', _('{he} was taken out of project by parents')),
('Unjustified absence from program activities for Greater Than 2 '
'months', _('of an unjustified absence for greater than 2 months')),
('Child ran away', 'Child ran away'),
('Crisis', 'Crisis'),
('Deceased', 'Deceased'),
('Fulfilled completion plan and reached completion date',
'Fulfilled completion plan and reached completion date'),
('Gone into military service', 'Gone into military service'),
('No longer interested in the Program', 'No longer interested in '
'the Program'),
('Project Capacity Issue', 'Project Capacity Issue'),
], readonly=True)
# Common fields
###############
# comments = fields.Char(readonly=True)
status = fields.Selection([
('Cancelled', 'Cancelled'),
('Closed', 'Closed'),
('In Progress', 'In Progress'),
('Open', 'Open'),
])
# Planned Exit fields
#####################
last_attended_project = fields.Date(readonly=True)
primary_school_finished = fields.Boolean(readonly=True)
# confesses_jesus_savior = fields.Boolean(readonly=True)
final_letter_sent = fields.Boolean(readonly=True)
sponsor_impact = fields.Char(readonly=True)
new_situation = fields.Char(readonly=True)
future_hopes = fields.Char(readonly=True)
family_impact = fields.Char(readonly=True)
# Transfer fields
#################
old_project_id = fields.Many2one('compassion.project', readonly=True)
transfer_arrival_date = fields.Date(readonly=True)
other_transfer_reason = fields.Char(readonly=True)
current_project = fields.Char(readonly=True)
new_project = fields.Char(readonly=True)
new_program = fields.Char(readonly=True)
previously_active_program = fields.Char(readonly=True)
# Transition fields
###################
transition_type = fields.Selection([
('Sponsorship-Home to Sponsorship-Center', 'Home to Center'),
('Survival to Sponsorship-Home', 'Survival to Home'),
('Traditional Survival to Sponsorship-Center',
'Traditional Survival to Center'),
], readonly=True)
# Unplanned Exit fields
#######################
child_death_date = fields.Date(readonly=True)
death_intervention_information = fields.Char(readonly=True)
child_death_category = fields.Selection([
('Abuse', 'Abuse'),
('Fatal Accident or Suicide', 'Fatal Accident or Suicide'),
('Gastro-Intestinal', 'Gastro-Intestinal'),
('Infection', 'Infection'),
('Maternal', 'Maternal'),
('Neonatal Disorders', 'Neonatal Disorders'),
('Non-Communicable Diseases', 'Non-Communicable Diseases'),
('Respiratory-Related', 'Respiratory-Related'),
('Unknown Cause', 'Unknown Cause'),
('Vaccine-Preventable Diseases', 'Vaccine-Preventable Diseases'),
('Vector-Borne', 'Vector-Borne'),
], readonly=True)
child_death_subcategory = fields.Selection([
('Abortion-related', 'Abortion-related'),
('Anemia', 'Anemia'),
('Asphyxia', 'Asphyxia'),
('Birth Asphyxia', 'Birth Asphyxia'),
('Bronchitis', 'Bronchitis'),
('Burns', 'Burns'),
('Cancer', 'Cancer'),
('Cardiovascular', 'Cardiovascular'),
('Chicken Pox', 'Chicken Pox'),
('Chikungunya', 'Chikungunya'),
('Cholera', 'Cholera'),
('Congenital Abnormalities', 'Congenital Abnormalities'),
('Dengue', 'Dengue'),
('Diabetes', 'Diabetes'),
('Diarrhea', 'Diarrhea'),
('Diphtheria', 'Diphtheria'),
('Drowning', 'Drowning'),
('Electrocution', 'Electrocution'),
('Epilepsy/Seizure Disorder', 'Epilepsy/Seizure Disorder'),
('Falls', 'Falls'),
('HIV/AIDS-related', 'HIV/AIDS-related'),
('Hepatitis', 'Hepatitis'),
('Influenza', 'Influenza'),
('Intra-partum-related Complications',
'Intra-partum-related Complications'),
('Japanese Encephalitis', 'Japanese Encephalitis'),
('Leukemia', 'Leukemia'),
('Malaria', 'Malaria'),
('Measles', 'Measles'),
('Meningitis', 'Meningitis'),
('Mumps', 'Mumps'),
('Natural Disaster', 'Natural Disaster'),
('Obstructed Labor', 'Obstructed Labor'),
('Other', 'Other'),
('Parasites', 'Parasites'),
('Pertussis', 'Pertussis'),
('Physical', 'Physical'),
('Pneumonia', 'Pneumonia'),
('Poisoning', 'Poisoning'),
('Polio', 'Polio'),
('Postpartum Complications (Hemorrhage, infection etc.)',
'Postpartum Complications (Hemorrhage, infection etc.)'),
('Pregnancy Complications (Preeclampsia, eclampsia, etc.)',
'Pregnancy Complications (Preeclampsia, eclampsia, etc.)'),
('Prematurity/ Low Birth Weight', 'Prematurity/ Low Birth Weight'),
('Renal Disease', 'Renal Disease'),
('Respiratory Tract Infection', 'Respiratory Tract Infection'),
('Rotavirus', 'Rotavirus'),
('Rubella', 'Rubella'),
('Sepsis/Infection', 'Sepsis/Infection'),
('Septicemia', 'Septicemia'),
('Skin', 'Skin'),
('Substance', 'Substance'),
('Sudden Infant Death', 'Sudden Infant Death'),
('Suicide', 'Suicide'),
('Tetanus', 'Tetanus'),
('Transportation Accident', 'Transportation Accident'),
('Tuberculosis', 'Tuberculosis'),
('Typhoid', 'Typhoid'),
('Typhus', 'Typhus'),
('Violence', 'Violence'),
('West Nile Virus', 'West Nile Virus'),
('Yellow Fever', 'Yellow Fever'),
], readonly=True)
_sql_constraints = [
('global_id', 'unique(global_id)',
'The lifecycle already exists in database.')
]
@api.model
def create(self, vals):
lifecycle = self.search([
('global_id', '=', vals['global_id'])
])
if lifecycle:
lifecycle.write(vals)
else:
lifecycle = super(ChildLifecycleEvent, self).create(vals)
# Process lifecycle event
if 'Exit' in lifecycle.type:
lifecycle.child_id.depart()
elif lifecycle.type == 'Reinstatement':
lifecycle.child_id.reinstatement()
return lifecycle
@api.model
def process_commkit(self, commkit_data):
lifecycle_mapping = mapping.new_onramp_mapping(
self._name,
self.env,
'new_child_lifecyle')
lifecycle_ids = list()
for single_data in commkit_data.get('BeneficiaryLifecycleEventList',
[commkit_data]):
vals = lifecycle_mapping.get_vals_from_connect(single_data)
lifecycle = self.create(vals)
lifecycle_ids.append(lifecycle.id)
return lifecycle_ids
|
philippe89/compassion-modules
|
child_compassion/models/child_lifecycle_event.py
|
Python
|
agpl-3.0
| 11,343
|
# Copyright 2017 reinforce.io. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
import tensorflow as tf
from tensorforce.core.explorations import Exploration
class GaussianNoise(Exploration):
"""
Explores via gaussian noise.
"""
def __init__(
self,
sigma=0.3,
mu=0.0,
scope='gaussian_noise',
summary_labels=()
):
"""
Initializes distribution values for gaussian noise
"""
self.sigma = sigma
self.mu = float(mu) # need to add cast to float to avoid tf type-mismatch error in case mu=0.0
super(GaussianNoise, self).__init__(scope=scope, summary_labels=summary_labels)
def tf_explore(self, episode, timestep, action_spec):
return tf.random_normal(shape=action_spec['shape'], mean=self.mu, stddev=self.sigma)
|
lefnire/tensorforce
|
tensorforce/core/explorations/gaussian_noise.py
|
Python
|
apache-2.0
| 1,432
|
# prosaicweb
# Copyright (C) 2016 nathaniel smith
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import sys
from . import views
from .models import Base, engine
from .views.auth import login, logout, register
from .app import app, bcrypt
routes = [
# TODO
# because html is dumb and forms can only use post/get, that's all we take
# here. However, within each view function, we check for a _method on a
# POST and treat that as the method. This should really be handled by a
# middleware.
('/', 'index', views.index, {}),
('/generate', 'generate', views.generate, {'methods': ['GET', 'POST']}),
('/corpora', 'corpora', views.corpora, {'methods': ['GET', 'POST',]}),
('/sources', 'sources', views.sources, {'methods': ['GET', 'POST',]}),
('/sources/<source_id>', 'source', views.source,
{'methods': ['GET', 'POST']}),
('/corpora/<corpus_id>', 'corpus', views.corpus,
{'methods': ['GET', 'POST']}),
('/phrases', 'phrases', views.phrases, {'methods': ['POST']}),
('/templates', 'templates', views.templates, {'methods': ['GET', 'POST']}),
('/templates/<template_id>', 'template', views.template,
{'methods': ['GET', 'POST']}),
('/auth/login', 'login', login, {'methods': ['POST']}),
('/auth/register', 'register', register, {'methods':['GET', 'POST']}),
('/auth/logout', 'logout', logout, {}),
]
for [route, name, fn, opts] in routes:
app.add_url_rule(route, name, fn, **opts)
def main() -> None:
if len(sys.argv) > 1 and sys.argv[1] == 'dbinit':
print('initializing prosaic and prosaicweb database state...')
Base.metadata.create_all(bind=engine)
exit(0)
app.run()
if __name__ == '__main__':
main()
|
nathanielksmith/prosaicweb
|
prosaicweb/__init__.py
|
Python
|
agpl-3.0
| 2,338
|
from django.shortcuts import render
import twitter_tools
import os
from django.http import HttpResponse
def query_by_ext(ext):
mimetype = {
'css': 'text/css',
'js': 'application/x-javascript',
'png': 'image/png',
'PNG': 'image/png',
'jpg': 'image/jpeg',
'jpeg': 'image/jpeg',
'JPG': 'image/jpeg',
'JPEG': 'image/jpeg',
'gif': 'image/gif',
'GIF': 'image/gif',
'xml': 'text/xml',
'swf': 'application/x-shockwave-flash',
'html': 'text/html',
}
return mimetype.get(ext, '')
def get_file(request, ext):
path = request.path
abspath = os.path.abspath('.') + path
stream = open(abspath, 'rb').read()
mimetype = query_by_ext(ext)
return HttpResponse(stream, mimetype = mimetype)
def home(request):
return render(request, 'search.html')
def recommend(request):
if request.method == 'GET' and request.GET.get('user_name', ''):
user_name = request.GET['user_name']
friends = twitter_tools.get_friends(user_name)
if not friends:
error = 'Not Following Anyone?'
return render(request, 'error.html', {'error':error, 'user_name':user_name})
friends = [x['user_id'] for x in friends]
friends_in_database = twitter_tools.get_friend_in_database(friends)
if not friends_in_database:
error = 'No Followee In Database?'
return render(request, 'error.html', {'error':error, 'user_name':user_name})
recommended = twitter_tools.get_recommended(friends_in_database)
for f in friends_in_database:
print f
print '*'* 200
for f in recommended:
print f
friends_in_database = twitter_tools.get_users(friends_in_database)
recommended = twitter_tools.get_users(recommended)
return render(request, 'result.html', {'friends':friends_in_database, 'recommended': recommended})
else:
error = 'Not User Name Provided?'
return render(request, 'errors.html', {'error':error, 'user_name':user_name})
|
WeakGroup/twitter-rec
|
interface/website/website/views.py
|
Python
|
gpl-2.0
| 2,044
|
# -*- coding: utf-8 -*-
def setupVarious(context):
# Ordinarily, GenericSetup handlers check for the existence of XML files.
# Here, we are not parsing an XML file, but we use this text file as a
# flag to check that we actually meant for this import step to be run.
# The file is found in profiles/default.
if context.readDataFile('docpool.elan_various.txt') is None:
return
# Add additional setup code here
from docpool.config.general.elan import install
install(context.getSite())
|
OpenBfS/dokpool-plone
|
Plone/src/docpool.elan/docpool/elan/setuphandlers.py
|
Python
|
gpl-3.0
| 529
|
from PushbulletLogging import *
|
wrow/PushbulletLogging
|
__init__.py
|
Python
|
mit
| 32
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('browser', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='RLibrary',
fields=[
('resource_ptr', models.OneToOneField(parent_link=True, auto_created=True, primary_key=True, serialize=False, to='browser.Resource')),
('phys_id', models.IntegerField(null=True, blank=True)),
('author', models.CharField(max_length=30)),
('item_type', models.CharField(max_length=1, choices=[(b'0', b'book'), (b'1', b'dvd'), (b'2', b'cd')])),
('catagory', models.CharField(max_length=1, choices=[(b'0', b'a'), (b'1', b'b'), (b'2', b'c')])),
('availablity', models.CharField(max_length=1, choices=[(b'0', b'available'), (b'1', b'reserved'), (b'2', b'out')])),
],
options={
},
bases=('browser.resource',),
),
migrations.CreateModel(
name='SDemo',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('age1to3', models.IntegerField(null=True, blank=True)),
('age3to18', models.IntegerField(null=True, blank=True)),
('age18plus', models.IntegerField(null=True, blank=True)),
('gender_m', models.IntegerField(null=True, blank=True)),
('gender_f', models.IntegerField(null=True, blank=True)),
('resource', models.ForeignKey(to='browser.Resource')),
],
options={
'abstract': False,
},
bases=(models.Model,),
),
migrations.AddField(
model_name='resource',
name='description',
field=models.TextField(blank=True),
preserve_default=True,
),
migrations.AddField(
model_name='resource',
name='title',
field=models.CharField(default=b'title', max_length=30),
preserve_default=True,
),
]
|
rwspicer/ARDA
|
arda_db/browser/migrations/0002_auto_20150320_0137.py
|
Python
|
mit
| 2,242
|
import codecs
import os
import re
from collections import OrderedDict
from common import SushiError, format_time, format_srt_time
def _parse_ass_time(string):
hours, minutes, seconds = map(float, string.split(':'))
return hours*3600+minutes*60+seconds
class ScriptEventBase(object):
def __init__(self, start, end):
super(ScriptEventBase, self).__init__()
self._shift = 0
self._diff = 1
self.start = start
self.end = end
self._linked_event = None
self._start_shift = 0
self._end_shift = 0
self.source_index = 0
@property
def shift(self):
return self._linked_event.shift if self.linked else self._shift
@property
def diff(self):
return self._linked_event.diff if self.linked else self._diff
@property
def duration(self):
return self.end - self.start
@property
def shifted_end(self):
return self.end + self.shift + self._end_shift
@property
def shifted_start(self):
return self.start + self.shift + self._start_shift
def apply_shift(self):
self.start = self.shifted_start
self.end = self.shifted_end
def set_shift(self, shift, audio_diff):
if self.linked:
raise Exception('Cannot set shift of a linked event. This is a bug')
self._shift = shift
self._diff = audio_diff
def adjust_additional_shifts(self, start_shift, end_shift):
if self.linked:
raise Exception('Cannot apply additional shifts to a linked event. This is a bug')
self._start_shift += start_shift
self._end_shift += end_shift
def get_link_chain_end(self):
return self._linked_event.get_link_chain_end() if self.linked else self
def link_event(self, other):
if other.get_link_chain_end() is self:
raise Exception('Circular link detected. This is a bug')
self._linked_event = other
def resolve_link(self):
if not self.linked:
raise Exception('Cannot resolve unlinked events. This is a bug')
self._shift = self._linked_event.shift
self._diff = self._linked_event.diff
self._linked_event = None
@property
def linked(self):
return self._linked_event is not None
def adjust_shift(self, value):
if self.linked:
raise Exception('Cannot adjust time of linked events. This is a bug')
self._shift += value
def __repr__(self):
return unicode(self)
class ScriptBase(object):
def sort_by_time(self):
self.events.sort(key=lambda x: x.start)
def remember_ordering(self):
for idx, event in enumerate(self.events):
# 1-based for srt, ass doesn't use them for anything but sorting anyway
event.source_index = idx + 1
class SrtEvent(ScriptEventBase):
EVENT_REGEX = re.compile("""
(\d+?). # line-number
(\d{1,2}:\d{1,2}:\d{1,2},\d+)\s-->\s(\d{1,2}:\d{1,2}:\d{1,2},\d+). # timestamp
(.+?) # actual text
(?= # lookahead for the next line or end of the file
(?:\d+?. # line-number
\d{1,2}:\d{1,2}:\d{1,2},\d+\s-->\s\d{1,2}:\d{1,2}:\d{1,2},\d+) # timestamp
|$
)""", flags=re.VERBOSE | re.DOTALL)
def __init__(self, idx, start, end, text):
super(SrtEvent, self).__init__(start, end)
self.source_index = idx
self.text = text
self.style = None
self.is_comment = False
@classmethod
def from_string(cls, text):
match = cls.EVENT_REGEX.match(text)
start = cls.parse_time(match.group(2))
end = cls.parse_time(match.group(3))
return SrtEvent(int(match.group(1)), start, end, match.group(4).strip())
def __unicode__(self):
return u'{0}\n{1} --> {2}\n{3}'.format(self.source_index, self._format_time(self.start),
self._format_time(self.end), self.text)
@staticmethod
def parse_time(time_string):
return _parse_ass_time(time_string.replace(',', '.'))
@staticmethod
def _format_time(seconds):
return format_srt_time(seconds)
class SrtScript(ScriptBase):
def __init__(self, events):
super(SrtScript, self).__init__()
self.events = events
@classmethod
def from_file(cls, path):
try:
with codecs.open(path, encoding='utf-8-sig') as script:
text = script.read()
events_list = []
for match in SrtEvent.EVENT_REGEX.finditer(text):
event = SrtEvent(
idx=int(match.group(1)),
start=SrtEvent.parse_time(match.group(2)),
end=SrtEvent.parse_time(match.group(3)),
text=match.group(4).strip()
)
events_list.append(event)
return cls(events_list)
except IOError:
raise SushiError("Script {0} not found".format(path))
def save_to_file(self, path):
text = '\n\n'.join(map(unicode, self.events))
with codecs.open(path, encoding='utf-8', mode='w') as script:
script.write(text)
class AssEvent(ScriptEventBase):
def __init__(self, text):
self.source_index = 0
split = text.split(':', 1)
self.kind = split[0]
self.is_comment = self.kind.lower() == 'comment'
split = [x.strip() for x in split[1].split(',', 9)]
start = _parse_ass_time(split[1])
end = _parse_ass_time(split[2])
super(AssEvent, self).__init__(start, end)
self.layer = split[0]
self.style = split[3]
self.name = split[4]
self.margin_left = split[5]
self.margin_right = split[6]
self.margin_vertical = split[7]
self.effect = split[8]
self.text = split[9]
def __unicode__(self):
return u'{0}: {1},{2},{3},{4},{5},{6},{7},{8},{9},{10}'.format(self.kind, self.layer,
self._format_time(self.start),
self._format_time(self.end),
self.style, self.name,
self.margin_left, self.margin_right,
self.margin_vertical, self.effect,
self.text)
@staticmethod
def _format_time(seconds):
return format_time(seconds)
class AssScript(ScriptBase):
def __init__(self, script_info, styles, events, other):
super(AssScript, self).__init__()
self.script_info = script_info
self.styles = styles
self.events = events
self.other = other
self.remember_ordering()
@classmethod
def from_file(cls, path):
script_info, styles, events = [], [], []
other_sections = OrderedDict()
def parse_script_info_line(line):
if line.startswith(u'Format:'):
return
script_info.append(line)
def parse_styles_line(line):
if line.startswith(u'Format:'):
return
styles.append(line)
def parse_event_line(line):
if line.startswith(u'Format:'):
return
events.append(AssEvent(line))
def create_generic_parse(section_name):
other_sections[section_name] = []
return lambda x: other_sections[section_name].append(x)
parse_function = None
try:
with codecs.open(path, encoding='utf-8-sig') as script:
for line_idx, line in enumerate(script):
line = line.strip()
if not line:
continue
low = line.lower()
if low == u'[script info]':
parse_function = parse_script_info_line
elif low == u'[v4+ styles]':
parse_function = parse_styles_line
elif low == u'[events]':
parse_function = parse_event_line
elif re.match(r'\[.+?\]', low):
parse_function = create_generic_parse(line)
elif not parse_function:
raise SushiError("That's some invalid ASS script")
else:
try:
parse_function(line)
except Exception as e:
raise SushiError("That's some invalid ASS script: {0} [line {1}]".format(e.message, line_idx))
except IOError:
raise SushiError("Script {0} not found".format(path))
return cls(script_info, styles, events, other_sections)
def save_to_file(self, path):
# if os.path.exists(path):
# raise RuntimeError('File %s already exists' % path)
lines = []
if self.script_info:
lines.append(u'[Script Info]')
lines.extend(self.script_info)
lines.append('')
if self.styles:
lines.append(u'[V4+ Styles]')
lines.append(u'Format: Name, Fontname, Fontsize, PrimaryColour, SecondaryColour, OutlineColour, BackColour, Bold, Italic, Underline, StrikeOut, ScaleX, ScaleY, Spacing, Angle, BorderStyle, Outline, Shadow, Alignment, MarginL, MarginR, MarginV, Encoding')
lines.extend(self.styles)
lines.append('')
if self.events:
events = sorted(self.events, key=lambda x: x.source_index)
lines.append(u'[Events]')
lines.append(u'Format: Layer, Start, End, Style, Name, MarginL, MarginR, MarginV, Effect, Text')
lines.extend(map(unicode, events))
if self.other:
for section_name, section_lines in self.other.iteritems():
lines.append('')
lines.append(section_name)
lines.extend(section_lines)
with codecs.open(path, encoding='utf-8-sig', mode='w') as script:
script.write(unicode(os.linesep).join(lines))
|
alicx1/Sushi
|
subs.py
|
Python
|
mit
| 10,556
|
compl_iupacdict = {'A':'T',
'C':'G',
'G':'C',
'T':'A',
'M':'K',
'R':'Y',
'W':'W',
'S':'S',
'Y':'R',
'K':'M',
'V':'B',
'H':'D',
'D':'H',
'B':'V',
'X':'X',
'N':'N'}
def compliment(seq, compl_iupacdict):
compl_seq = ""
for i in range(0,len(seq)):
letter = seq[i]
compl_seq = compl_seq + compl_iupacdict[letter]
return compl_seq
def reverse(text):
return text[::-1]
def revcomp(seq):
revCompSeq = reverse(compliment(seq, compl_iupacdict))
return revCompSeq
#=========================================================================
def iupacList_2_regExList(motifList):
i = 0
while i < len(motifList):
motifList[i] = [motifList[i], iupac2regex(motifList[i])]
i += 1
def iupac2regex(motif):
iupacdict = {'A':'A',
'C':'C',
'G':'G',
'T':'T',
'M':'[AC]',
'R':'[AG]',
'W':'[AT]',
'S':'[CG]',
'Y':'[CT]',
'K':'[GT]',
'V':'[ACG]',
'H':'[ACT]',
'D':'[AGT]',
'B':'[CGT]',
'X':'[ACGT]',
'N':'[ACGT]'}
transl_motif = ""
for i in range(0,len(motif)):
letter = motif[i]
transl_motif = transl_motif + iupacdict[letter]
return transl_motif
|
xguse/spartan
|
src/spartan/utils/seqs.py
|
Python
|
mit
| 1,664
|
# MIT License
# Copyright (c) 2016 Diogo Dutra
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
from falconswagger.models.orm.redis import ModelRedisMeta, ModelRedisFactory
from falconswagger.exceptions import ModelBaseError
from unittest import mock
import pytest
import msgpack
class TestModelRedisFactory(object):
def test_build(self):
model = ModelRedisFactory.make('TestModel', 'test', ['id'], {})
assert model.__name__ == 'TestModel'
assert model.__key__ == 'test'
assert model.__schema__ == {}
@pytest.fixture
def model():
return ModelRedisFactory.make('TestModel', 'test', ['id'], {})
class TestModelRedisMetaInsert(object):
def test_without_objects(self, model):
session = mock.MagicMock()
assert model.insert(session, []) == []
def test_with_objects_len_less_than_chunks(self, model):
session = mock.MagicMock()
expected_map = {
b'1': msgpack.dumps({'id': 1})
}
assert model.insert(session, [{'id': 1}]) == [{'id': 1}]
assert session.redis_bind.hmset.call_args_list == [mock.call('test', expected_map)]
def test_with_objects_len_greater_than_chunks(self, model):
session = mock.MagicMock()
expected_map1 = {
b'1': msgpack.dumps({'id': 1})
}
expected_map2 = {
b'2': msgpack.dumps({'id': 2})
}
model.CHUNKS = 1
assert model.insert(session, [{'id': 1}, {'id': 2}]) == [{'id': 1}, {'id': 2}]
assert session.redis_bind.hmset.call_args_list == [
mock.call('test', expected_map1),
mock.call('test', expected_map2)]
class TestModelRedisMetaUpdateWithoutIDs(object):
def test_without_objects_and_without_ids(self, model):
session = mock.MagicMock()
session.redis_bind.hkeys.return_value = []
assert model.update(session, []) == []
def test_hmset_with_objects_and_without_ids(self, model):
session = mock.MagicMock()
session.redis_bind.hkeys.return_value = ['1'.encode()]
expected_map = {
'1'.encode(): msgpack.dumps({'id': 1})
}
assert model.update(session, [{'id': 1}]) == [{'id': 1}]
assert session.redis_bind.hmset.call_args_list == [mock.call('test', expected_map)]
def test_hmset_with_objects_and_without_ids_and_with_invalid_keys(self, model):
session = mock.MagicMock()
session.redis_bind.hkeys.return_value = ['test']
assert model.update(session, [{'id': 1}]) == []
assert session.redis_bind.hmset.call_args_list == []
def test_hmset_with_objects_and_without_ids_and_with_one_invalid_key(self, model):
session = mock.MagicMock()
session.redis_bind.hkeys.return_value = ['test', '1'.encode(), 'test2']
expected_map = {
'1'.encode(): msgpack.dumps({'id': 1})
}
assert model.update(session, [{'id': 1}, {'id': 2}]) == [{'id': 1}]
assert session.redis_bind.hmset.call_args_list == [mock.call('test', expected_map)]
def test_hmset_with_objects_and_without_ids_with_set_map_len_greater_than_chunks(self, model):
session = mock.MagicMock()
session.redis_bind.hkeys.return_value = ['2'.encode(), '1'.encode()]
model.CHUNKS = 1
expected_map1 = {
'1'.encode(): msgpack.dumps({'id': 1})
}
expected_map2 = {
'2'.encode(): msgpack.dumps({'id': 2})
}
assert model.update(session, [{'id': 1}, {'id': 2}]) == [{'id': 1}, {'id': 2}]
assert (session.redis_bind.hmset.call_args_list == [
mock.call('test', expected_map1),
mock.call('test', expected_map2)
] or session.redis_bind.hmset.call_args_list == [
mock.call('test', expected_map2),
mock.call('test', expected_map1)
])
class TestModelRedisMetaUpdateWithIDs(object):
def test_without_objects_and_with_ids(self, model):
session = mock.MagicMock()
session.redis_bind.hkeys.return_value = ['2', '1']
assert model.update(session, [], {'id': 1}) == []
assert not session.redis_bind.hmset.called
def test_with_objects_and_with_ids_and_with_one_id_different_than_objects(self, model):
session = mock.MagicMock()
session.redis_bind.hkeys.return_value = ['1'.encode()]
expected_map = {
'1'.encode(): msgpack.dumps({'id': 1})
}
assert model.update(session, [{'id': 1}], [{'id': 1}, {'id': 2}]) == [{'id': 1}]
assert session.redis_bind.hmset.call_args_list == [mock.call('test', expected_map)]
def test_with_objects_and_with_ids_and_with_one_obj_different_than_ids(self, model):
session = mock.MagicMock()
session.redis_bind.hkeys.return_value = ['1'.encode()]
expected_map = {
'1'.encode(): msgpack.dumps({'id': 1})
}
assert model.update(session, [{'id': 1}, {'id': 2}], [{'id': 1}]) == [{'id': 1}]
assert session.redis_bind.hmset.call_args_list == [mock.call('test', expected_map)]
def test_with_objects_and_with_ids_and_with_ids_different_than_objects(self, model):
session = mock.MagicMock()
session.redis_bind.hkeys.return_value = ['1'.encode()]
assert model.update(session, [{'id': 2}], {'id': 1}) == []
assert not session.redis_bind.hmset.called
def test_with_objects_and_with_ids_and_with_objs_different_than_ids(self, model):
session = mock.MagicMock()
session.redis_bind.hkeys.return_value = ['2'.encode()]
assert model.update(session, [{'id': 2}], {'id': 1}) == []
assert not session.redis_bind.hmset.called
def test_hmset_with_objects_and_with_ids(self, model):
session = mock.MagicMock()
session.redis_bind.hkeys.return_value = ['1'.encode()]
expected_map = {
'1'.encode(): msgpack.dumps({'id': 1})
}
assert model.update(session, {'id': 1}, {'id': 1}) == [{'id': 1}]
assert session.redis_bind.hmset.call_args_list == [mock.call('test', expected_map)]
def test_hmset_with_objects_and_with_ids_and_with_invalid_keys(self, model):
session = mock.MagicMock()
session.redis_bind.hkeys.return_value = ['test']
assert model.update(session, [{'id': 1}], [{'id': 1}]) == []
assert session.redis_bind.hmset.call_args_list == []
def test_hmset_with_objects_and_with_ids_and_with_one_invalid_key(self, model):
session = mock.MagicMock()
session.redis_bind.hkeys.return_value = ['test', '1'.encode(), 'test2']
expected_map = {
'1'.encode(): msgpack.dumps({'id': 1})
}
assert model.update(session, [{'id': 1}, {'id': 2}], [{'id': 1}, {'id': 2}]) == [{'id': 1}]
assert session.redis_bind.hmset.call_args_list == [mock.call('test', expected_map)]
def test_hmset_with_objects_and_with_ids_len_greater_than_chunks(self, model):
session = mock.MagicMock()
session.redis_bind.hkeys.return_value = ['2'.encode(), '1'.encode()]
model.CHUNKS = 1
expected_map1 = {
'1'.encode(): msgpack.dumps({'id': 1})
}
expected_map2 = {
'2'.encode(): msgpack.dumps({'id': 2})
}
assert model.update(session, [{'id': 1}, {'id': 2}], [{'id': 1}, {'id': 2}]) == [
{'id': 1}, {'id': 2}
]
assert (session.redis_bind.hmset.call_args_list == [
mock.call('test', expected_map1),
mock.call('test', expected_map2)
] or session.redis_bind.hmset.call_args_list == [
mock.call('test', expected_map2),
mock.call('test', expected_map1)
])
class TestModelRedisMetaDelete(object):
def test_without_ids(self, model):
session = mock.MagicMock()
model.delete(session, [])
assert not session.redis_bind.hdel.called
def test_delete(self, model):
session = mock.MagicMock()
model.delete(session, {'id': 1})
assert session.redis_bind.hdel.call_args_list == [mock.call('test', b'1')]
class TestModelRedisMetaGetAll(object):
def test_get_all(self, model):
session = mock.MagicMock()
session.redis_bind.hmget.return_value = [msgpack.dumps({'id': 1})]
assert model.get(session) == []
def test_get_all_with_limit(self, model):
session = mock.MagicMock()
session.redis_bind.hkeys.return_value = ['1'.encode(), '2'.encode()]
model.get(session, limit=1)
assert session.redis_bind.hmget.call_args_list == [mock.call('test', b'1')]
def test_get_all_with_limit_and_offset(self, model):
session = mock.MagicMock()
session.redis_bind.hkeys.return_value = ['1'.encode(), '2'.encode(), '3'.encode()]
model.get(session, limit=2, offset=1)
assert session.redis_bind.hmget.call_args_list == [mock.call('test', b'2', b'3')]
def test_get_all_with_offset(self, model):
session = mock.MagicMock()
session.redis_bind.hkeys.return_value = ['1'.encode(), '2'.encode(), '3'.encode()]
model.get(session, offset=2)
assert session.redis_bind.hmget.call_args_list == [mock.call('test', b'3')]
class TestModelRedisMetaGetMany(object):
def test_get_many(self, model):
session = mock.MagicMock()
model.get(session, {'id': 1})
assert session.redis_bind.hmget.call_args_list == [mock.call('test', b'1')]
def test_get_many_with_limit(self, model):
session = mock.MagicMock()
model.get(session, [{'id': 1}, {'id': 2}], limit=1)
assert session.redis_bind.hmget.call_args_list == [mock.call('test', b'1')]
def test_get_many_with_limit_and_offset(self, model):
session = mock.MagicMock()
model.get(session, [{'id': 1}, {'id': 2}, {'id': 3}], limit=2, offset=1)
assert session.redis_bind.hmget.call_args_list == [mock.call('test', b'2', b'3')]
def test_get_many_with_offset(self, model):
session = mock.MagicMock()
model.get(session, [{'id': 1}, {'id': 2}, {'id': 3}], offset=2)
assert session.redis_bind.hmget.call_args_list == [mock.call('test', b'3')]
|
dutradda/falcon-swagger
|
tests/unit/base/models/test_redis.py
|
Python
|
mit
| 11,240
|
# coding=utf-8
import logging
from pyage.core import address
from pyage.core.agent.agent import unnamed_agents
from pyage.core.agent.aggregate import AggregateAgent
from pyage.core.emas import EmasService
from pyage.core.locator import GridLocator
from pyage.core.migration import ParentMigration
from pyage.core.stats.gnuplot import StepStatistics
from pyage.core.stop_condition import StepLimitStopCondition
from solution.crossover import AverageVectorCrossover
from solution.emas_initializer import emas_initializer
from solution.evaluation import StyblinskiTangEvaluation
from solution.mutation import UniformVectorMutation
vectors = [
[-5, -5, -5, -5, -5],
[-4, -4, -4, -4, -4],
[-3, -3, -3, -3, -3],
[-2, -2, -2, -2, -2],
[-1, -1, -1, -1, -1],
[0, 0, 0, 0, 0, 0],
[1, 1, 1, 1, 1],
[2, 2, 2, 2, 2],
[3, 3, 3, 3, 3],
[4, 4, 4, 4, 4],
[5, 5, 5, 5, 5]
]
vector_nr = len(vectors)
logger = logging.getLogger(__name__)
agents_count = 5
logger.debug("EMAS, %s agents", agents_count)
agents = unnamed_agents(agents_count, AggregateAgent)
stop_condition = lambda: StepLimitStopCondition(1000)
agg_size = 40
aggregated_agents = lambda: emas_initializer(
energy=40, lowerbound=-5, upperbound=5, size=agg_size
)
emas = EmasService
minimal_energy = lambda: 10
reproduction_minimum = lambda: 90
migration_minimum = lambda: 120
newborn_energy = lambda: 100
transferred_energy = lambda: 40
evaluation = lambda: StyblinskiTangEvaluation()
crossover = lambda: AverageVectorCrossover(size=vector_nr)
mutation = lambda: UniformVectorMutation(probability=0.1, radius=0.1)
address_provider = address.SequenceAddressProvider
migration = ParentMigration
locator = GridLocator
stats = lambda: StepStatistics(
'fitness_%s_pyage.txt' % __name__
)
|
Hoobie/pyage-styblinski-tang
|
conf/emasconf.py
|
Python
|
mit
| 1,791
|
# -*- coding: utf-8 -*-
import os
ENV_NAME = "staging"
DEBUG = os.environ.get("FLASK_DEBUG", False) == "True"
|
rdev-hackaton/msze_www
|
msze_www/settings/staging.py
|
Python
|
gpl-3.0
| 111
|
from djangoappengine.settings_base import *
import os
# Activate django-dbindexer for the default database
DATABASES['native'] = DATABASES['default']
DATABASES['default'] = {'ENGINE': 'dbindexer', 'TARGET': 'native'}
AUTOLOAD_SITECONF = 'indexes'
SECRET_KEY = '=r-$b*8hglm+858&9t043hlm6-&6-3d3vfc4((7yd0dbrakhvi'
INSTALLED_APPS = (
'django.contrib.admin',
'django.contrib.contenttypes',
'django.contrib.auth',
'django.contrib.sessions',
'django.contrib.messages',
'djangotoolbox',
'autoload',
'dbindexer',
'osfinalproject',
'filetransfers',
# djangoappengine should come last, so it can override a few manage.py commands
'djangoappengine',
)
MIDDLEWARE_CLASSES = (
# This loads the index definitions, so it has to come first
'autoload.middleware.AutoloadMiddleware',
'django.middleware.common.CommonMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
)
TEMPLATE_CONTEXT_PROCESSORS = (
'django.contrib.auth.context_processors.auth',
'django.core.context_processors.request',
'django.core.context_processors.media',
)
# This test runner captures stdout and associates tracebacks with their
# corresponding output. Helps a lot with print-debugging.
TEST_RUNNER = 'djangotoolbox.test.CapturingTestSuiteRunner'
STATIC_URL = '/static/'
TEMPLATE_DIRS = (os.path.join(os.path.dirname(__file__), 'templates'),)
ROOT_URLCONF = 'urls'
LOGIN_REDIRECT_URL = '/osfinalproject/'
ALLOWED_HOSTS = ('.osfinalproject.appspot.com',)
PREPARE_UPLOAD_BACKEND = 'filetransfers.backends.default.prepare_upload'
SERVE_FILE_BACKEND = 'filetransfers.backends.default.serve_file'
PUBLIC_DOWNLOAD_URL_BACKEND = 'filetransfers.backends.default.public_download_url'
|
ormnv/os_final_project
|
settings.py
|
Python
|
bsd-3-clause
| 1,866
|
'''
Created on Dec 22, 2011
@author: ajju
'''
from common import HttpUtils
from common.DataObjects import VideoHostingInfo, VideoInfo, VIDEO_QUAL_SD
import base64
import binascii
import urllib
try:
import json
except ImportError:
import simplejson as json
def getVideoHostingInfo():
video_hosting_info = VideoHostingInfo()
video_hosting_info.set_video_hosting_image('http://www.videozer.com/images/logo.jpg')
video_hosting_info.set_video_hosting_name('Videozer')
return video_hosting_info
def retrieveVideoInfo(video_id):
video_info = VideoInfo()
video_info.set_video_hosting_info(getVideoHostingInfo())
video_info.set_video_id(video_id)
try:
video_info_link = 'http://www.videozer.com/player_control/settings.php?v=' + video_id + '&fv=v1.1.45'
jsonObj = json.load(urllib.urlopen(video_info_link))
key1 = jsonObj["cfg"]["environment"]["rkts"]
key2 = jsonObj["cfg"]["login"]["pepper"]
key3 = jsonObj["cfg"]["ads"]["lightbox2"]["time"]
values = binascii.unhexlify(decrypt(jsonObj["cfg"]["login"]["spen"], jsonObj["cfg"]["login"]["salt"], 950569)).split(';')
spn = HttpUtils.getUrlParams(values[0])
outk = HttpUtils.getUrlParams(values[1])
ikey = getikey(int(outk["ik"]))
urlKey = ''
for spnkey in spn:
spnval = spn[spnkey]
if spnval == '1':
cypher = jsonObj["cfg"]["info"]["sece2"]
urlKey = urlKey + spnkey + '=' + decrypt(cypher, key1, ikey, ln=256) + '&'
if spnval == '2':
cypher = jsonObj["cfg"]["ads"]["g_ads"]["url"]
urlKey = urlKey + spnkey + '=' + decrypt(cypher, key1, ikey) + '&'
if spnval == '3':
cypher = jsonObj["cfg"]["ads"]["g_ads"]["type"]
urlKey = urlKey + spnkey + '=' + decrypt(cypher, key1, ikey, 26, 25431, 56989, 93, 32589, 784152) + '&'
if spnval == '4':
cypher = jsonObj["cfg"]["ads"]["g_ads"]["time"]
urlKey = urlKey + spnkey + '=' + decrypt(cypher, key1, ikey, 82, 84669, 48779, 32, 65598, 115498) + '&'
if spnval == '5':
cypher = jsonObj["cfg"]["login"]["euno"]
urlKey = urlKey + spnkey + '=' + decrypt(cypher, key2, ikey, 10, 12254, 95369, 39, 21544, 545555) + '&'
if spnval == '6':
cypher = jsonObj["cfg"]["login"]["sugar"]
urlKey = urlKey + spnkey + '=' + decrypt(cypher, key3, ikey, 22, 66595, 17447, 52, 66852, 400595) + '&'
urlKey = urlKey + "start=0"
video_link = ""
for videoStrm in jsonObj["cfg"]["quality"]:
if videoStrm["d"]:
video_link = str(base64.b64decode(videoStrm["u"]))
if video_link == "":
video_info.set_video_stopped(False)
raise Exception("VIDEO_STOPPED")
video_link = video_link + '&' + urlKey
video_info.set_video_name(jsonObj["cfg"]["info"]["video"]["title"])
video_info.set_video_image(jsonObj["cfg"]["environment"]["thumbnail"])
video_info.set_video_stopped(False)
video_info.add_video_link(VIDEO_QUAL_SD, video_link)
except:
video_info.set_video_stopped(True)
return video_info
def getikey(i):
if i == 1:
return 215678
elif i == 2:
return 516929
elif i == 3:
return 962043
elif i == 4:
return 461752
elif i == 5:
return 141994
else:
return -1
def hex2bin(hexStr):
binaryStr = ''
for c in hexStr:
binaryStr = binaryStr + bin(int(c, 16))[2:].zfill(4)
return binaryStr
def bin2hex(binStr):
hexStr = ''
for i in range(len(binStr) - 4, -1, -4):
oneBinStr = binStr[i:i + 4]
hexStr = hexStr + hex(int(oneBinStr.zfill(4), 2))[2:]
hexStr = hexStr[::-1]
return hexStr
def decrypt(cypher, key1, key2, keySetA_1=11, keySetA_2=77213, keySetA_3=81371, keySetB_1=17, keySetB_2=92717, keySetB_3=192811, ln=None):
C = list(hex2bin(cypher))
if ln is None:
ln = len(C) * 2
B = int(ln * 1.5) * [None]
for i in range(0, int(ln * 1.5)):
key1 = (key1 * keySetA_1 + keySetA_2) % keySetA_3
key2 = (key2 * keySetB_1 + keySetB_2) % keySetB_3
B[i] = (key1 + key2) % int(ln * 0.5)
x = y = z = 0
for i in range(ln, -1 , -1):
x = B[i]
y = i % int(ln * 0.5)
z = C[x]
C[x] = C[y]
C[y] = z
for i in range(0, int(ln * 0.5), 1):
C[i] = str(int(C[i]) ^ int(B[i + ln]) & 1)
binStr = ''.join(C)
return bin2hex(binStr)
|
dknlght/dkodi
|
src/script.module.turtle/lib/snapvideo/Videozer.py
|
Python
|
gpl-2.0
| 4,708
|
from sqlalchemy import *
from migrate import *
from migrate.changeset import schema
pre_meta = MetaData()
post_meta = MetaData()
adress = Table('adress', pre_meta,
Column('id', INTEGER, primary_key=True, nullable=False),
Column('user_id', INTEGER),
Column('adress', VARCHAR(length=50)),
)
adress = Table('adress', post_meta,
Column('id', Integer, primary_key=True, nullable=False),
Column('user_id', Integer),
Column('address', Text),
)
def upgrade(migrate_engine):
# Upgrade operations go here. Don't create your own engine; bind
# migrate_engine to your metadata
pre_meta.bind = migrate_engine
post_meta.bind = migrate_engine
pre_meta.tables['adress'].columns['adress'].drop()
post_meta.tables['adress'].columns['address'].create()
def downgrade(migrate_engine):
# Operations to reverse the above upgrade go here.
pre_meta.bind = migrate_engine
post_meta.bind = migrate_engine
pre_meta.tables['adress'].columns['adress'].create()
post_meta.tables['adress'].columns['address'].drop()
|
serdimoa/vincenzoext
|
db_repository/versions/016_migration.py
|
Python
|
bsd-3-clause
| 1,060
|
# This file is part of Invenio.
# Copyright (C) 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010, 2011, 2012, 2014 CERN.
#
# Invenio is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License as
# published by the Free Software Foundation; either version 2 of the
# License, or (at your option) any later version.
#
# Invenio is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Invenio; if not, write to the Free Software Foundation, Inc.,
# 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
"""Invenio Access Control Config. """
__revision__ = \
"$Id$"
# pylint: disable=C0301
from invenio.config import CFG_SITE_NAME, CFG_SITE_URL, CFG_SITE_SECURE_URL, CFG_SITE_SUPPORT_EMAIL, CFG_SITE_RECORD, CFG_SITE_ADMIN_EMAIL
from invenio.base.i18n import _
from invenio.base.globals import cfg as config
# VALUES TO BE EXPORTED
# CURRENTLY USED BY THE FILES access_control_engine.py modules.access.control.py webaccessadmin_lib.py
# name of the role giving superadmin rights
SUPERADMINROLE = 'superadmin'
# name of the webaccess webadmin role
WEBACCESSADMINROLE = 'webaccessadmin'
# name of the action allowing roles to access the web administrator interface
WEBACCESSACTION = 'cfgwebaccess'
# name of the action allowing roles to access the web administrator interface
VIEWRESTRCOLL = 'viewrestrcoll'
# name of the action allowing roles to delegate the rights to other roles
# ex: libraryadmin to delegate libraryworker
DELEGATEADDUSERROLE = 'accdelegaterole'
# max number of users to display in the drop down selects
MAXSELECTUSERS = 25
# max number of users to display in a page (mainly for user area)
MAXPAGEUSERS = 25
# default role definition, source:
CFG_ACC_EMPTY_ROLE_DEFINITION_SRC = 'deny all'
# default role definition, compiled:
CFG_ACC_EMPTY_ROLE_DEFINITION_OBJ = (False, ())
# default role definition, compiled and serialized:
CFG_ACC_EMPTY_ROLE_DEFINITION_SER = None
# List of tags containing (multiple) emails of users who should authorize
# to access the corresponding record regardless of collection restrictions.
#if CFG_CERN_SITE:
# CFG_ACC_GRANT_AUTHOR_RIGHTS_TO_EMAILS_IN_TAGS = ['859__f', '270__m']
#else:
CFG_ACC_GRANT_AUTHOR_RIGHTS_TO_EMAILS_IN_TAGS = ['8560_f']
#if CFG_CERN_SITE:
# CFG_ACC_GRANT_VIEWER_RIGHTS_TO_EMAILS_IN_TAGS = ['506__m']
#else:
CFG_ACC_GRANT_VIEWER_RIGHTS_TO_EMAILS_IN_TAGS = []
# Use external source for access control?
# CFG_EXTERNAL_AUTHENTICATION -- this is a dictionary with the enabled login method.
# The key is the name of the login method and the value is an instance of
# of the login method (see /help/admin/webaccess-admin-guide#5). Set the value
# to None if you wish to use the local Invenio authentication method.
# CFG_EXTERNAL_AUTH_DEFAULT -- set this to the key in CFG_EXTERNAL_AUTHENTICATION
# that should be considered as default login method
# CFG_EXTERNAL_AUTH_USING_SSO -- set this to the login method name of an SSO
# login method, if any, otherwise set this to None.
# CFG_EXTERNAL_AUTH_LOGOUT_SSO -- if CFG_EXTERNAL_AUTH_USING_SSO was not None
# set this to the URL that should be contacted to perform an SSO logout
CFG_EXTERNAL_AUTH_DEFAULT = 'Local'
CFG_EXTERNAL_AUTH_USING_SSO = False
CFG_EXTERNAL_AUTH_LOGOUT_SSO = None
CFG_EXTERNAL_AUTHENTICATION = {
"Local": None,
# "Robot": ExternalAuthRobot(enforce_external_nicknames=True, use_zlib=False),
# "ZRobot": ExternalAuthRobot(enforce_external_nicknames=True, use_zlib=True)
# CFG_EXTERNAL_AUTH_USING_SSO : ea_sso.ExternalAuthSSO(enforce_external_nicknames=True),
}
# from invenio.legacy.external_authentication.robot import ExternalAuthRobot
# if CFG_CERN_SITE:
# from invenio.legacy.external_authentication import sso as ea_sso
# CFG_EXTERNAL_AUTH_USING_SSO = "CERN"
# CFG_EXTERNAL_AUTH_DEFAULT = CFG_EXTERNAL_AUTH_USING_SSO
# CFG_EXTERNAL_AUTH_LOGOUT_SSO = 'https://login.cern.ch/adfs/ls/?wa=wsignout1.0'
# CFG_EXTERNAL_AUTHENTICATION = {
# CFG_EXTERNAL_AUTH_USING_SSO : ea_sso.ExternalAuthSSO(),
# }
# elif CFG_INSPIRE_SITE:
# # INSPIRE specific robot configuration
# CFG_EXTERNAL_AUTH_DEFAULT = 'Local'
# CFG_EXTERNAL_AUTH_USING_SSO = False
# CFG_EXTERNAL_AUTH_LOGOUT_SSO = None
# CFG_EXTERNAL_AUTHENTICATION = {
# "Local": None,
# "Robot": ExternalAuthRobot(enforce_external_nicknames=True, use_zlib=False, check_user_ip=2, external_id_attribute_name='personid'),
# "ZRobot": ExternalAuthRobot(enforce_external_nicknames=True, use_zlib=True, check_user_ip=2, external_id_attribute_name='personid')
# }
# else:
# CFG_EXTERNAL_AUTH_DEFAULT = 'Local'
# CFG_EXTERNAL_AUTH_USING_SSO = False
# CFG_EXTERNAL_AUTH_LOGOUT_SSO = None
# CFG_EXTERNAL_AUTHENTICATION = {
# "Local": None,
# "Robot": ExternalAuthRobot(enforce_external_nicknames=True, use_zlib=False),
# "ZRobot": ExternalAuthRobot(enforce_external_nicknames=True, use_zlib=True)
# }
# CFG_TEMP_EMAIL_ADDRESS
# Temporary email address for logging in with an OpenID/OAuth provider which
# doesn't supply email address
CFG_TEMP_EMAIL_ADDRESS = "%s@NOEMAIL"
# CFG_OPENID_PROVIDERS
# CFG_OAUTH1_PROVIDERS
# CFG_OAUTH2_PROVIDERS
# Choose which providers you want to use. Some providers don't supply e mail
# address, if you choose them, the users will be registered with an temporary
# email address like CFG_TEMP_EMAIL_ADDRESS % randomstring
#
# Order of the login buttons can be changed by CFG_EXTERNAL_LOGIN_BUTTON_ORDER
# in invenio.websession_config
CFG_OPENID_PROVIDERS = [
'google',
'yahoo',
'aol',
'wordpress',
'myvidoop',
'openid',
'verisign',
'myopenid',
'myspace',
'livejournal',
'blogger'
]
CFG_OAUTH1_PROVIDERS = [
'twitter',
'linkedin',
'flickr'
]
CFG_OAUTH2_PROVIDERS = [
'facebook',
'yammer',
'foursquare',
'googleoauth2',
'instagram',
'orcid'
]
# CFG_OPENID_CONFIGURATIONS
# identifier: (required) identifier url. {0} will be replaced by username (an
# input).
# trust_email: (optional, default: False) Some providers let their users
# change their emails on login page. If the provider doesn't let the user,
# set it True.
CFG_OPENID_CONFIGURATIONS = {
'openid': {
'identifier': '{0}'
},
'myvidoop': {
'identifier': '{0}.myvidoop.com'
},
'google': {
'identifier': 'https://www.google.com/accounts/o8/id',
'trust_email': True
},
'wordpress': {
'identifier': '{0}.wordpress.com'
},
'aol': {
'identifier': 'openid.aol.com/{0}',
'trust_email': True
},
'myopenid': {
'identifier': '{0}.myopenid.com'
},
'yahoo': {
'identifier': 'yahoo.com',
'trust_email': True
},
'verisign': {
'identifier': '{0}.pip.verisignlabs.com'
},
'myspace': {
'identifier': 'www.myspace.com/{0}'
},
'livejournal': {
'identifier': '{0}.livejournal.com'
},
'blogger': {
'identifier': '{0}'
}
}
# CFG_OAUTH1_CONFIGURATIONS
#
# !!IMPORTANT!!
# While creating an app in the provider site, the callback uri (redirect uri)
# must be in the form of :
# CFG_SITE_SECURE_URL/youraccount/login?login_method=oauth1&provider=PROVIDERNAME
#
# consumer_key: required
# Consumer key taken from provider.
#
# consumer_secret: required
# Consumer secret taken from provider.
#
# authorize_url: required
# The url to redirect the user for authorization
#
# authorize_parameters: optional
# Additional parameters for authorize_url (ie. scope)
#
# request_token_url: required
# The url to get request token
#
# access_token_url: required
# The url to exchange the request token with the access token
#
# request_url: optional
# The url to gather the user information
#
# request_parameters: optional
# Additional parameters for request_url
#
# email, nickname: optional
# id: required
# The location where these properties in the response returned from the
# provider.
# example:
# if the response is:
# {
# 'user': {
# 'user_name': 'ABC',
# 'contact': [
# {
# 'email': 'abc@def.com'
# }
# ]
# },
# 'user_id': 'XXX',
# }
# then:
# email must be : ['user', 'contact', 0, 'email']
# id must be: ['user_id']
# nickname must be: ['user', 'user_name']
#
# debug: optional
# When debug key is set to 1, after login process, the json object
# returned from provider is displayed on the screen. It may be used
# for finding where the id, email or nickname is.
CFG_OAUTH1_CONFIGURATIONS = {
'twitter': {
'consumer_key' : '',
'consumer_secret' : '',
'request_token_url' : 'https://api.twitter.com/oauth/request_token',
'access_token_url' : 'https://api.twitter.com/oauth/access_token',
'authorize_url' : 'https://api.twitter.com/oauth/authorize',
'id': ['user_id'],
'nickname': ['screen_name']
},
'flickr': {
'consumer_key' : '',
'consumer_secret' : '',
'request_token_url' : 'http://www.flickr.com/services/oauth/request_token',
'access_token_url' : 'http://www.flickr.com/services/oauth/access_token',
'authorize_url' : 'http://www.flickr.com/services/oauth/authorize',
'authorize_parameters': {
'perms': 'read'
},
'nickname': ['username'],
'id': ['user_nsid']
},
'linkedin': {
'consumer_key' : '',
'consumer_secret' : '',
'request_token_url' : 'https://api.linkedin.com/uas/oauth/requestToken',
'access_token_url' : 'https://api.linkedin.com/uas/oauth/accessToken',
'authorize_url' : 'https://www.linkedin.com/uas/oauth/authorize',
'request_url': 'http://api.linkedin.com/v1/people/~:(id)',
'request_parameters': {
'format': 'json'
},
'id': ['id']
}
}
# CFG_OAUTH2_CONFIGURATIONS
#
# !!IMPORTANT!!
# While creating an app in the provider site, the callback uri (redirect uri)
# must be in the form of :
# CFG_SITE_SECURE_URL/youraccount/login?login_method=oauth2&provider=PROVIDERNAME
#
# consumer_key: required
# Consumer key taken from provider.
#
# consumer_secret: required
# Consumer secret taken from provider.
#
# authorize_url: required
# The url to redirect the user for authorization
#
# authorize_parameters:
# Additional parameters for authorize_url (like scope)
#
# access_token_url: required
# The url to get the access token.
#
# request_url: required
# The url to gather the user information.
# {access_token} will be replaced by access token
#
# email, nickname: optional
# id: required
# The location where these properties in the response returned from the
# provider.
# !! See the example in CFG_OAUTH1_CONFIGURATIONS !!
#
# debug: optional
# When debug key is set to 1, after login process, the json object
# returned from provider is displayed on the screen. It may be used
# for finding where the id, email or nickname is.
CFG_OAUTH2_CONFIGURATIONS = {
'facebook': {
'consumer_key': '',
'consumer_secret': '',
'access_token_url': 'https://graph.facebook.com/oauth/access_token',
'authorize_url': 'https://www.facebook.com/dialog/oauth',
'authorize_parameters': {
'scope': 'email'
},
'request_url' : 'https://graph.facebook.com/me?access_token={access_token}',
'email': ['email'],
'id': ['id'],
'nickname': ['username']
},
'foursquare': {
'consumer_key': '',
'consumer_secret': '',
'access_token_url': 'https://foursquare.com/oauth2/access_token',
'authorize_url': 'https://foursquare.com/oauth2/authorize',
'request_url': 'https://api.foursquare.com/v2/users/self?oauth_token={access_token}',
'id': ['response', 'user', 'id'],
'email': ['response', 'user', 'contact' ,'email']
},
'yammer': {
'consumer_key': '',
'consumer_secret': '',
'access_token_url': 'https://www.yammer.com/oauth2/access_token.json',
'authorize_url': 'https://www.yammer.com/dialog/oauth',
'request_url': 'https://www.yammer.com/oauth2/access_token.json?access_token={access_token}',
'email':['user', 'contact', 'email_addresses', 0, 'address'],
'id': ['user', 'id'],
'nickname': ['user', 'name']
},
'googleoauth2': {
'consumer_key': '',
'consumer_secret': '',
'access_token_url': 'https://accounts.google.com/o/oauth2/token',
'authorize_url': 'https://accounts.google.com/o/oauth2/auth',
'authorize_parameters': {
'scope': 'https://www.googleapis.com/auth/userinfo.profile https://www.googleapis.com/auth/userinfo.email'
},
'request_url': 'https://www.googleapis.com/oauth2/v1/userinfo?access_token={access_token}',
'email':['email'],
'id': ['id']
},
'instagram': {
'consumer_key': '',
'consumer_secret': '',
'access_token_url': 'https://api.instagram.com/oauth/access_token',
'authorize_url': 'https://api.instagram.com/oauth/authorize/',
'authorize_parameters': {
'scope': 'basic'
},
'id': ['user', 'id'],
'nickname': ['user', 'username']
},
'orcid': {
'consumer_key': '',
'consumer_secret': '',
'authorize_url': 'http://sandbox-1.orcid.org/oauth/authorize',
'access_token_url': 'http://api.sandbox-1.orcid.org/oauth/token',
'request_url': 'http://api.sandbox-1.orcid.org/{id}/orcid-profile',
'authorize_parameters': {
'scope': '/orcid-profile/read-limited',
'response_type': 'code',
'access_type': 'offline',
},
'id': ['orcid'],
}
}
# Let's override OpenID/OAuth1/OAuth2 configuration from invenio(-local).conf
CFG_OPENID_PROVIDERS = config['CFG_OPENID_PROVIDERS']
CFG_OAUTH1_PROVIDERS = config['CFG_OAUTH1_PROVIDERS']
CFG_OAUTH2_PROVIDERS = config['CFG_OAUTH2_PROVIDERS']
if config['CFG_OPENID_CONFIGURATIONS']:
for provider, configuration in config['CFG_OPENID_CONFIGURATIONS'].items():
if provider in CFG_OPENID_CONFIGURATIONS:
CFG_OPENID_CONFIGURATIONS[provider].update(configuration)
else:
CFG_OPENID_CONFIGURATIONS[provider] = configuration
if config['CFG_OAUTH1_CONFIGURATIONS']:
for provider, configuration in config['CFG_OAUTH1_CONFIGURATIONS'].items():
if provider in CFG_OAUTH1_CONFIGURATIONS:
CFG_OAUTH1_CONFIGURATIONS[provider].update(configuration)
else:
CFG_OAUTH1_CONFIGURATIONS[provider] = configuration
if config['CFG_OAUTH2_CONFIGURATIONS']:
for provider, configuration in config['CFG_OAUTH2_CONFIGURATIONS'].items():
if provider in CFG_OAUTH2_CONFIGURATIONS:
CFG_OAUTH2_CONFIGURATIONS[provider].update(configuration)
else:
CFG_OAUTH2_CONFIGURATIONS[provider] = configuration
# If OpenID authentication is enabled, add 'openid' to login methods
CFG_OPENID_AUTHENTICATION = bool(CFG_OPENID_PROVIDERS)
if CFG_OPENID_AUTHENTICATION:
from invenio.legacy.external_authentication.openid import ExternalOpenID
CFG_EXTERNAL_AUTHENTICATION['openid'] = ExternalOpenID(enforce_external_nicknames=True)
# If OAuth1 authentication is enabled, add 'oauth1' to login methods.
CFG_OAUTH1_AUTHENTICATION = bool(CFG_OAUTH1_PROVIDERS)
if CFG_OAUTH1_AUTHENTICATION:
from invenio.legacy.external_authentication.oauth1 import ExternalOAuth1
CFG_EXTERNAL_AUTHENTICATION['oauth1'] = ExternalOAuth1(enforce_external_nicknames=True)
# If OAuth2 authentication is enabled, add 'oauth2' to login methods.
CFG_OAUTH2_AUTHENTICATION = bool(CFG_OAUTH2_PROVIDERS)
if CFG_OAUTH2_AUTHENTICATION:
from invenio.legacy.external_authentication.oauth2 import ExternalOAuth2
CFG_EXTERNAL_AUTHENTICATION['oauth2'] = ExternalOAuth2(enforce_external_nicknames=True)
# If using SSO, this is the number of seconds after which the keep-alive
# SSO handler is pinged again to provide fresh SSO information.
CFG_EXTERNAL_AUTH_SSO_REFRESH = 600
# default data for the add_default_settings function
# Note: by default the definition is set to deny any. This won't be a problem
# because userid directly connected with roles will still be allowed.
# roles
# name description definition
DEF_ROLES = ((SUPERADMINROLE, 'superuser with all rights', 'deny any'),
(WEBACCESSADMINROLE, 'WebAccess administrator', 'deny any'),
('anyuser', 'Any user', 'allow any'),
('basketusers', 'Users who can use baskets', 'allow any'),
('loanusers', 'Users who can use loans', 'allow any'),
('groupusers', 'Users who can use groups', 'allow any'),
('alertusers', 'Users who can use alerts', 'allow any'),
('messageusers', 'Users who can use messages', 'allow any'),
('holdingsusers', 'Users who can view holdings', 'allow any'),
('statisticsusers', 'Users who can view statistics', 'allow any'),
('claimpaperusers', 'Users who can perform changes to their own paper attributions without the need for an operator\'s approval', 'allow any'),
('claimpaperoperators', 'Users who can perform changes to _all_ paper attributions without the need for an operator\'s approval', 'deny any'),
('paperclaimviewers', 'Users who can view "claim my paper" facilities.', 'allow all'),
('paperattributionviewers', 'Users who can view "attribute this paper" facilities', 'allow all'),
('paperattributionlinkviewers', 'Users who can see attribution links in the search', 'allow all'),
('authorlistusers', 'Users who can user Authorlist tools', 'deny all'),
('holdingpenusers', 'Users who can view Holding Pen', 'deny all'),
)
# users
# list of e-mail addresses
DEF_USERS = []
# actions
# name desc allowedkeywords optional
DEF_ACTIONS = (
('cfgwebsearch', 'configure WebSearch', '', 'no'),
('cfgbibformat', 'configure BibFormat', '', 'no'),
('cfgbibknowledge', 'configure BibKnowledge', '', 'no'),
('cfgwebsubmit', 'configure WebSubmit', '', 'no'),
('cfgbibrank', 'configure BibRank', '', 'no'),
('cfgwebcomment', 'configure WebComment', '', 'no'),
('cfgweblinkback', 'configure WebLinkback' , '', 'no'),
('cfgoaiharvest', 'configure OAI Harvest', '', 'no'),
('cfgoairepository', 'configure OAI Repository', '', 'no'),
('cfgbibindex', 'configure BibIndex', '', 'no'),
('cfgbibexport', 'configure BibExport', '', 'no'),
('cfgrobotkeys', 'configure Robot keys', 'login_method,robot', 'yes'),
('cfgbibsort', 'configure BibSort', '', 'no'),
('runbibindex', 'run BibIndex', '', 'no'),
('runbibupload', 'run BibUpload', '', 'no'),
('runwebcoll', 'run webcoll', 'collection', 'yes'),
('runbibformat', 'run BibFormat', 'format', 'yes'),
('runbibclassify', 'run BibClassify', 'taxonomy', 'yes'),
('runbibtaskex', 'run BibTaskEx example', '', 'no'),
('runbibrank', 'run BibRank', '', 'no'),
('runoaiharvest', 'run oaiharvest task', '', 'no'),
('runoairepository', 'run oairepositoryupdater task', '', 'no'),
('runbibedit', 'run Record Editor', 'collection', 'yes'),
('runbibeditmulti', 'run Multi-Record Editor', '', 'no'),
('runbibdocfile', 'run Document File Manager', '', 'no'),
('runbibmerge', 'run Record Merger', '', 'no'),
('runbibswordclient', 'run BibSword client', '', 'no'),
('runwebstatadmin', 'run WebStadAdmin', '', 'no'),
('runinveniogc', 'run InvenioGC', '', 'no'),
('runbibexport', 'run BibExport', '', 'no'),
('runauthorlist', 'run Authorlist tools', '', 'no'),
('referee', 'referee document type doctype/category categ', 'doctype,categ', 'yes'),
('submit', 'use webSubmit', 'doctype,act,categ', 'yes'),
('viewrestrdoc', 'view restricted document', 'status', 'no'),
('viewrestrcomment', 'view restricted comment', 'status', 'no'),
(WEBACCESSACTION, 'configure WebAccess', '', 'no'),
(DELEGATEADDUSERROLE, 'delegate subroles inside WebAccess', 'role', 'no'),
(VIEWRESTRCOLL, 'view restricted collection', 'collection', 'no'),
('cfgwebjournal', 'configure WebJournal', 'name,with_editor_rights', 'no'),
('viewcomment', 'view comments', 'collection', 'no'),
('viewlinkbacks', 'view linkbacks', 'collection', 'no'),
('sendcomment', 'send comments', 'collection', 'no'),
('attachcommentfile', 'attach files to comments', 'collection', 'no'),
('attachsubmissionfile', 'upload files to drop box during submission', '', 'no'),
('cfgbibexport', 'configure BibExport', '', 'no'),
('runbibexport', 'run BibExport', '', 'no'),
('usebaskets', 'use baskets', '', 'no'),
('useloans', 'use loans', '', 'no'),
('usegroups', 'use groups', '', 'no'),
('usealerts', 'use alerts', '', 'no'),
('usemessages', 'use messages', '', 'no'),
('viewholdings', 'view holdings', 'collection', 'yes'),
('viewstatistics', 'view statistics', 'collection', 'yes'),
('runbibcirculation', 'run BibCirculation', '', 'no'),
('moderatecomments', 'moderate comments', 'collection', 'no'),
('moderatelinkbacks', 'moderate linkbacks', 'collection', 'no'),
('runbatchuploader', 'run batchuploader', 'collection', 'yes'),
('runbibtasklet', 'run BibTaskLet', '', 'no'),
('claimpaper_view_pid_universe', 'View the Claim Paper interface', '', 'no'),
('claimpaper_claim_own_papers', 'Clam papers to his own personID', '', 'no'),
('claimpaper_claim_others_papers', 'Claim papers for others', '', 'no'),
('claimpaper_change_own_data', 'Change data associated to his own person ID', '', 'no'),
('claimpaper_change_others_data', 'Change data of any person ID', '', 'no'),
('runbibtasklet', 'run BibTaskLet', '', 'no'),
('cfgbibsched', 'configure BibSched', '', 'no'),
('runinfomanager', 'run Info Space Manager', '', 'no')
)
from invenio.ext.principal.wrappers import Action
for action in DEF_ACTIONS:
type(action[0], (Action, ), {
'__doc__': action[1],
'allowedkeywords': action[2].split(','),
'optional': action[3] == "yes"
})
# Default authorizations
# role action arguments
DEF_AUTHS = (('basketusers', 'usebaskets', {}),
('loanusers', 'useloans', {}),
('groupusers', 'usegroups', {}),
('alertusers', 'usealerts', {}),
('messageusers', 'usemessages', {}),
('holdingsusers', 'viewholdings', {}),
('statisticsusers', 'viewstatistics', {}),
('authorlistusers', 'runauthorlist', {}),
('claimpaperusers', 'claimpaper_view_pid_universe', {}),
('claimpaperoperators', 'claimpaper_view_pid_universe', {}),
('claimpaperusers', 'claimpaper_claim_own_papers', {}),
('claimpaperoperators', 'claimpaper_claim_own_papers', {}),
('claimpaperoperators', 'claimpaper_claim_others_papers', {}),
('claimpaperusers', 'claimpaper_change_own_data', {}),
('claimpaperoperators', 'claimpaper_change_own_data', {}),
('claimpaperoperators', 'claimpaper_change_others_data', {}),
('holdingpenusers', 'viewholdingpen', {}),
)
# Activities (i.e. actions) for which exists an administrative web interface.
CFG_ACC_ACTIVITIES_URLS = {
'runbibedit' : (_("Run Record Editor"), "%s/%s/edit/?ln=%%s" % (CFG_SITE_URL, CFG_SITE_RECORD)),
'runbibeditmulti' : (_("Run Multi-Record Editor"), "%s/%s/multiedit/?ln=%%s" % (CFG_SITE_URL, CFG_SITE_RECORD)),
'runbibdocfile' : (_("Run Document File Manager"), "%s/%s/managedocfiles?ln=%%s" % (CFG_SITE_URL, CFG_SITE_RECORD)),
'runbibmerge' : (_("Run Record Merger"), "%s/%s/merge/?ln=%%s" % (CFG_SITE_URL, CFG_SITE_RECORD)),
'runbibswordclient' : (_("Run BibSword client"), "%s/bibsword/?ln=%%s" % CFG_SITE_URL),
'cfgbibknowledge' : (_("Configure BibKnowledge"), "%s/kb?ln=%%s" % CFG_SITE_URL),
'cfgbibformat' : (_("Configure BibFormat"), "%s/admin/bibformat/bibformatadmin.py?ln=%%s" % CFG_SITE_URL),
'cfgoaiharvest' : (_("Configure OAI Harvest"), "%s/admin/oaiharvest/oaiharvestadmin.py?ln=%%s" % CFG_SITE_URL),
'cfgoairepository' : (_("Configure OAI Repository"), "%s/admin/oairepository/oairepositoryadmin.py?ln=%%s" % CFG_SITE_URL),
'cfgbibindex' : (_("Configure BibIndex"), "%s/admin/bibindex/bibindexadmin.py?ln=%%s" % CFG_SITE_URL),
'cfgbibrank' : (_("Configure BibRank"), "%s/admin/bibrank/bibrankadmin.py?ln=%%s" % CFG_SITE_URL),
'cfgwebaccess' : (_("Configure WebAccess"), "%s/admin/webaccess/webaccessadmin.py?ln=%%s" % CFG_SITE_URL),
'cfgwebcomment' : (_("Configure WebComment"), "%s/admin/webcomment/webcommentadmin.py?ln=%%s" % CFG_SITE_URL),
'cfgweblinkback' : (_("Configure WebLinkback"), "%s/admin/weblinkback/weblinkbackadmin.py?ln=%%s" % CFG_SITE_URL),
'cfgwebsearch' : (_("Configure WebSearch"), "%s/admin/websearch/websearchadmin.py?ln=%%s" % CFG_SITE_URL),
'cfgwebsubmit' : (_("Configure WebSubmit"), "%s/admin/websubmit/websubmitadmin.py?ln=%%s" % CFG_SITE_URL),
'cfgwebjournal' : (_("Configure WebJournal"), "%s/admin/webjournal/webjournaladmin.py?ln=%%s" % CFG_SITE_URL),
'cfgbibsort' : (_("Configure BibSort"), "%s/admin/bibsort/bibsortadmin.py?ln=%%s" % CFG_SITE_URL),
'runbibcirculation' : (_("Run BibCirculation"), "%s/admin/bibcirculation/bibcirculationadmin.py?ln=%%s" % CFG_SITE_URL),
'runbatchuploader' : (_("Run Batch Uploader"), "%s/batchuploader/metadata?ln=%%s" % CFG_SITE_URL),
'runinfomanager' : (_("Run Info Space Manager"), "%s/info/manage?ln=%%s" % CFG_SITE_URL),
'claimpaper_claim_others_papers' : (_("Run Person/Author Manager"), "%s/author/search?ln=%%s" % CFG_SITE_URL)
}
CFG_WEBACCESS_MSGS = {
0: 'Try to <a href="%s/youraccount/login?referer=%%s">login</a> with another account.' % (CFG_SITE_SECURE_URL),
1: '<br />If you think this is not correct, please contact: <a href="mailto:%s">%s</a>' % (CFG_SITE_SUPPORT_EMAIL, CFG_SITE_SUPPORT_EMAIL),
2: '<br />If you have any questions, please write to <a href="mailto:%s">%s</a>' % (CFG_SITE_SUPPORT_EMAIL, CFG_SITE_SUPPORT_EMAIL),
3: 'Guest users are not allowed, please <a href="%s/youraccount/login">login</a>.' % CFG_SITE_SECURE_URL,
4: 'The site is temporarily closed for maintenance. Please come back soon.',
5: 'Authorization failure',
6: '%s temporarily closed' % CFG_SITE_NAME,
7: 'This functionality is temporarily closed due to server maintenance. Please use only the search engine in the meantime.',
8: 'Functionality temporarily closed',
9: '<br />If you think this is not correct, please contact: <a href="mailto:%s">%s</a>',
10: '<br />You might also want to check <a href="%s">%s</a>',
}
CFG_WEBACCESS_WARNING_MSGS = {
0: 'Authorization granted',
1: 'You are not authorized to perform this action.',
2: 'You are not authorized to perform any action.',
3: 'The action %s does not exist.',
4: 'Unexpected error occurred.',
5: 'Missing mandatory keyword argument(s) for this action.',
6: 'Guest accounts are not authorized to perform this action.',
7: 'Not enough arguments, user ID and action name required.',
8: 'Incorrect keyword argument(s) for this action.',
9: """Account '%s' is not yet activated.""",
10: """You were not authorized by the authentication method '%s'.""",
11: """The selected login method '%s' is not the default method for this account, please try another one.""",
12: """Selected login method '%s' does not exist.""",
13: """Could not register '%s' account.""",
14: """Could not login using '%s', because this user is unknown.""",
15: """Could not login using your '%s' account, because you have introduced a wrong password.""",
16: """External authentication troubles using '%s' (maybe temporary network problems).""",
17: """You have not yet confirmed the email address for the '%s' authentication method.""",
18: """The administrator has not yet activated your account for the '%s' authentication method.""",
19: """The site is having troubles in sending you an email for confirming your email address. The error has been logged and will be taken care of as soon as possible.""",
20: """No roles are authorized to perform action %s with the given parameters.""",
21: """Verification cancelled""",
22: """Verification failed. Please try again or use another provider to login""",
23: """Verification failed. It is probably because the configuration isn't set properly. Please contact with the <a href="mailto:%s">administator</a>""" % CFG_SITE_ADMIN_EMAIL
}
|
crepererum/invenio
|
invenio/modules/access/local_config.py
|
Python
|
gpl-2.0
| 30,236
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Drop-in replacement for django.contrib.messages which handles Horizon's
messaging needs (e.g. AJAX communication, etc.).
"""
from django.contrib import messages as _messages
from django.contrib.messages import constants
from django.utils.encoding import force_unicode
def add_message(request, level, message, extra_tags='', fail_silently=False):
"""Attempts to add a message to the request using the 'messages' app."""
if request.is_ajax():
tag = constants.DEFAULT_TAGS[level]
request.horizon['async_messages'].append([tag,
force_unicode(message)])
else:
return _messages.add_message(request, level, message,
extra_tags, fail_silently)
def debug(request, message, extra_tags='', fail_silently=False):
"""Adds a message with the ``DEBUG`` level."""
add_message(request, constants.DEBUG, message, extra_tags=extra_tags,
fail_silently=fail_silently)
def info(request, message, extra_tags='', fail_silently=False):
"""Adds a message with the ``INFO`` level."""
add_message(request, constants.INFO, message, extra_tags=extra_tags,
fail_silently=fail_silently)
def success(request, message, extra_tags='', fail_silently=False):
"""Adds a message with the ``SUCCESS`` level."""
add_message(request, constants.SUCCESS, message, extra_tags=extra_tags,
fail_silently=fail_silently)
def warning(request, message, extra_tags='', fail_silently=False):
"""Adds a message with the ``WARNING`` level."""
add_message(request, constants.WARNING, message, extra_tags=extra_tags,
fail_silently=fail_silently)
def error(request, message, extra_tags='', fail_silently=False):
"""Adds a message with the ``ERROR`` level."""
add_message(request, constants.ERROR, message, extra_tags=extra_tags,
fail_silently=fail_silently)
|
gotostack/iSwift
|
iswift/swiftapi/messages.py
|
Python
|
apache-2.0
| 2,533
|
from sentry.plugins.bases.notify import NotificationConfigurationForm
class CommentConfForm(NotificationConfigurationForm):
"""Configuration form.
For now this form just inherits from
NotificationConfigurationForm.
"""
|
andialbrecht/sentry-comments
|
sentry_comments/forms.py
|
Python
|
bsd-3-clause
| 238
|
import os
import sys
import shutil
import filecmp
import bots.botsglobal as botsglobal
import bots.botslib as botslib
def cleanoutputdir():
''' delete directory standard-out 'bots/botssys/out' (as indicated in bos.ini). '''
botssys = botsglobal.ini.get('directories','botssys')
shutil.rmtree(os.path.join(botssys,'outfile'),ignore_errors=True) #remove whole output directory
def getreportlastrun():
''' Return the results of the last run as a dict.'''
for row in botslib.query(u'''SELECT *
FROM report
ORDER BY idta DESC
'''):
return dict(row)
raise Exception('no report')
def comparedicts(result_expect,result_run):
error = ''
for key,value in result_expect.items():
if key not in result_run:
error += 'Could not find key "%s" in results of run?\n'%(key)
elif value != result_run[key]:
error += 'Comparing key "%s": expect "%s" but got "%s"\n'%(key,value,result_run[key])
if error:
print 'errors: %s,'%(error)
def comparerunresults(result_expect):
''' result_expect is a dict that contains the expected results of a run.
These expected results are compared with the actual results.
Usage eg:
CompareRunResults({'status':0,'lastreceived':6,'lasterror':0,'lastdone':6,'lastok':0,'lastopen':0,'send':4,'processerrors':0,'filesize':6638})
'''
result_run = getreportlastrun()
comparedicts(result_expect,result_run)
#**************************************************************************
#**************************************************************************
#**************************************************************************
def pretest(routestorun):
cleanoutputdir()
#cleanpreviousruns: if ta-filereport-report have clean mark as acceptancetest!
def posttest(routestorun):
#Compare run results
comparerunresults({'status':0,'lastreceived':6,'lasterror':0,'lastdone':6,'lastok':0,'lastopen':0,'send':4,'processerrors':0,'filesize':6638})
#Compare outgoing files.
#Run run first, save results in 'botssys/outfile' in 'botssys/infile' (so there is a directory 'botssys/infile/outfile'....)
#than run again; files in bot directories will be compared.
botssys = botsglobal.ini.get('directories','botssys')
outdir = os.path.join(botssys,'outfile')
compdir = os.path.join(botssys,'infile/outfile')
cmpobj = filecmp.dircmp(outdir, compdir)
cmpobj.report_full_closure()
|
Micronaet/micronaet-bots
|
edifact2fixed_orders-desadv-invoic-aperak/usersys/routescripts/bots_acceptancetest.py
|
Python
|
gpl-3.0
| 2,574
|
import os
from analyzer.report import Grade
class LicenseAnalyzer(Grade):
"""An analyzer for checking license
has_license: Whether if license file exists or not
"""
LICENSE_PATTERN = ('license', 'license.md', 'license.rst', 'license.txt')
weight = 0.01
def __init__(self):
self.has_license = False
def calculate_score(self):
"""Calculate the analyzer score"""
self.score = 100 if self.has_license else 0
def run(self, path):
"""Check if license file exists
path: Cloned repository path
"""
for _, _, filenames in os.walk(path):
for filename in filenames:
if filename.lower() in self.LICENSE_PATTERN:
self.has_license = True
return
def to_document(self):
"""Make document dict of instance to store to db"""
return {
'license': {
'has_license': self.has_license
}
}
|
mingrammer/pyreportcard
|
analyzer/license.py
|
Python
|
mit
| 999
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
import sys
from PySide6 import QtCore, QtWidgets
class MyWidget(QtWidgets.QWidget):
def __init__(self):
super().__init__()
self.text = QtWidgets.QLabel("Hello World", alignment=QtCore.Qt.AlignCenter)
self.layout = QtWidgets.QVBoxLayout(self)
self.layout.addWidget(self.text)
if __name__ == "__main__":
app = QtWidgets.QApplication([])
widget = MyWidget()
widget.resize(800, 600)
widget.show()
sys.exit(app.exec())
|
jeremiedecock/snippets
|
python/pyside/pyside6/hello.py
|
Python
|
mit
| 526
|
#!/usr/bin/python
"""
UDP Service Scanner version 0.1 by dev_zzo
This work has largely been inspired by:
https://github.com/portcullislabs/udp-proto-scanner
As is, this is more like a prober than scanner;
it operates using predefined probes for each known protocol.
"""
import argparse
import socket
import struct
import time
__scan_spec = (
# port, service name, probe
(53, 'DNSStatusRequest',
"\x00\x00\x10\x00\x00\x00\x00\x00\x00\x00\x00\x00"),
(53, 'DNSVersionBindReq',
"\x00\x06\x01\x00\x00\x01\x00\x00\x00\x00\x00\x00\x07\x76\x65\x72\x73\x69\x6f\x6e\x04\x62\x69\x6e\x64\x00\x00\x10\x00\x03"),
(69, 'tftp', "\x00\x01/etc/passwd\x00netascii\x00"),
(111, 'rpc',
"\x03\x9b\x65\x42\x00\x00\x00\x00\x00\x00\x00\x02\x00\x0f\x42\x43\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00"),
(111, 'RPCCheck',
"\x72\xFE\x1D\x13\x00\x00\x00\x00\x00\x00\x00\x02\x00\x01\x86\xA0\x00\x01\x97\x7C\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00"),
(123, 'ntp',
"\xcb\x00\x04\xfa\x00\x01\x00\x00\x00\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\xbf\xbe\x70\x99\xcd\xb3\x40\x00"),
(123, 'NTPRequest',
"\xe3\x00\x04\xfa\x00\x01\x00\x00\x00\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\xc5\x4f\x23\x4b\x71\xb1\x52\xf3"),
(137, 'NBTStat', "\x80\xf0\x00\x10\x00\x01\x00\x00\x00\x00\x00\x00\x20\x43\x4b\x41\x41\x41\x41\x41\x41\x41\x41\x41\x41\x41\x41\x41\x41\x41\x41\x41\x41\x41\x41\x41\x41\x41\x41\x41\x41\x41\x41\x41\x41\x00\x00\x21\x00\x01"),
# SNMP v1 GetRequest PDU, with community=public
(161, 'SNMPv1GetRequest-public',
"\x30\x82\x00\x2f\x02\x01\x00\x04\x06\x70\x75\x62\x6c\x69\x63\xa0\x82\x00\x20\x02\x04\x4c\x33\xa7\x56\x02\x01\x00\x02\x01\x00\x30\x82\x00\x10\x30\x82\x00\x0c\x06\x08\x2b\x06\x01\x02\x01\x01\x05\x00\x05\x00"),
# SNMP v3 GetRequest PDU, no auth, no priv, contextEngineID=0, contextName=0
# Unlikely to work...
(161, 'SNMPv3GetRequest',
"\x30\x3a\x02\x01\x03\x30\x0f\x02\x02\x4a\x69\x02\x03\x00\xff\xe3\x04\x01\x04\x02\x01\x03\x04\x10\x30\x0e\x04\x00\x02\x01\x00\x02\x01\x00\x04\x00\x04\x00\x04\x00\x30\x12\x04\x00\x04\x00\xa0\x0c\x02\x02\x37\xf0\x02\x01\x00\x02\x01\x00\x30\x00"),
(177, 'xdmcp', "\x00\x01\x00\x02\x00\x01\x00\x00"),
(500, 'ike', "\x5b\x5e\x64\xc0\x3e\x99\xb5\x11\x00\x00\x00\x00\x00\x00\x00\x00\x01\x10\x02\x00\x00\x00\x00\x00\x00\x00\x01\x50\x00\x00\x01\x34\x00\x00\x00\x01\x00\x00\x00\x01\x00\x00\x01\x28\x01\x01\x00\x08\x03\x00\x00\x24\x01\x01"),
(523, 'db2', "DB2GETADDR\x00SQL08020"),
(1434, 'ms-sql', "\x02"),
(1434, 'ms-sql-slam', "\x0A"),
(1604, 'citrix', "\x1e\x00\x01\x30\x02\xfd\xa8\xe3\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00"),
(5405, 'net-support', "\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x80\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00"),
(6502, 'netop', "\xd6\x81\x81\x52\x00\x00\x00\xf3\x87\x4e\x01\x02\x32\x00\xa8\xc0\x00\x00\x01\x13\xc1\xd9\x04\xdd\x03\x7d\x00\x00\x0d\x00\x54\x48\x43\x54\x48\x43\x54\x48\x43\x54\x48\x43\x54\x48\x43\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x02\x32\x00\xa8\xc0\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00"),
)
def ip2long(ipaddr):
return long(struct.unpack('!L', socket.inet_aton(ipaddr))[0])
def long2ip(ipaddr):
return socket.inet_ntoa(struct.pack('!L', ipaddr))
def __dump_bytes(data):
return ' '.join([('%02X' % ord(x)) for x in data])
def __dump_chars(data):
return ''.join([(x if 0x20 <= ord(x) < 0x80 else '.') for x in data])
def dump(data):
i = 0
lines = []
while i < len(data):
line = data[i:(i + 16)]
p1 = __dump_bytes(line[:8])
p2 = __dump_bytes(line[8:]) if len(line) > 8 else ''
lines.append('%08X %-24s %-24s %s' % (i, p1, p2, __dump_chars(line)))
i += 16
return "\n".join(lines)
def parse_targets(targets):
"Parse the target specs provided by the user"
results = []
for target_spec in targets:
if '/' in target_spec:
# a.b.c.d/m ?
net_addr, net_mask = target_spec.split('/')
net_addr = ip2long(net_addr)
net_mask = int(net_mask)
dev_mask = (1 << (32 - net_mask)) - 1
net_addr = net_addr & ~dev_mask
# First address is not allocated, last address is broadcast
for i in xrange(1, dev_mask):
addr = long2ip(net_addr + i)
results.append(addr)
elif '-' in target_spec:
# a.b.c.d-e.f.g.h ?
start_addr, end_addr = target_spec.split('-')
addr = ip2long(start_addr)
end_addr = ip2long(end_addr)
while addr <= end_addr:
addr = long2ip(a)
results.append(addr)
addr += 1
else:
addr = ip2long(target_spec)
results.append(target_spec)
return results
def scan_main(args):
"Main scanning routine"
global __scan_spec
targets = parse_targets(args.targets)
responses = {}
print("Starting scan.")
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
s.bind(('', 57022))
try:
for port, name, probe in __scan_spec:
print("Running probe '%s'..." % name)
s.settimeout(None)
for target in targets:
s.sendto(probe, (target, port))
s.settimeout(0.0)
# print("Waiting for replies...")
time.sleep(args.delay)
while True:
try:
response, addr = s.recvfrom(16384)
# print("Response from %s:%d" % addr)
try:
target_responses = responses[addr]
except KeyError:
target_responses = responses[addr] = {}
target_responses[name] = response
except socket.error as e:
# http://stackoverflow.com/a/2578794/1654774
# ICMP Port Unreachable can't be handled properly. :-(
if e.args[0] in (11, 10035):
break
if e.args[0] not in (10054):
raise
finally:
s.close()
print("Scan completed.")
for addr, target_responses in responses.iteritems():
print('')
print('=' * 76)
print("Report for %s:" % addr[0])
print('=' * 76)
for name, response in target_responses.iteritems():
print('')
print("Probe: %s, port: %d" % (name, addr[1]))
print(dump(response))
def __main():
print('\nUDP Service Scanner version 0.1\n')
parser = argparse.ArgumentParser(description='UDP Service Scanner')
parser.add_argument('targets', metavar='target', nargs='+',
help='IP address or range (ip/mask, ip-ip)')
parser.add_argument('--delay',
type=float,
default=1.0,
help='Time to wait (seconds) before moving on to the next probe')
args = parser.parse_args()
scan_main(args)
if __name__ == '__main__':
__main()
|
dev-zzo/pwn-tools
|
scanners/udp-probe.py
|
Python
|
unlicense
| 7,615
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright 2015 Christoph Reiter
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
import os
import sys
from collections import namedtuple
from xml.dom import minidom
# .py <jhbuild_prefix> <bundle.app>
# takes a jhbuild prefix and an app bundle and lists all the
# files and from which jhbuild package they come from
def main(argv):
assert len(argv) == 3
jhbuild = os.path.join(argv[1], "_jhbuild")
bundle_base = os.path.join(argv[2], "Contents", "Resources")
info = os.path.join(jhbuild, "info")
Entry = namedtuple("Entry", ["package", "version", "files"])
entries = {}
for key in os.listdir(info):
path = os.path.join(info, key)
xmldoc = minidom.parse(path)
item = xmldoc.getElementsByTagName('entry')[0]
package = item.attributes['package'].value
version = item.attributes['version'].value
entry = Entry(package, version, set())
entries[key] = entry
def norm_py(path):
# reduce all paths to their source variant so we can connect
# different variants between the installed state and the
# final one in the bundle (since we compile and delete
# the sources..)
if path.endswith((".pyc", ".pyo")):
return path[:-1]
return path
manifests = os.path.join(jhbuild, "manifests")
for key in os.listdir(manifests):
path = os.path.join(manifests, key)
with open(path, "rb") as h:
for file_ in h.read().splitlines():
entries[key].files.add(norm_py(file_))
found = set()
for root, dirs, files in os.walk(bundle_base):
for f in files:
path = os.path.relpath(os.path.join(root, f), bundle_base)
found.add(norm_py(path))
for entry in sorted(entries.values(), key=lambda e: e.package):
here = set([p for p in entry.files if p in found])
if here:
print entry.package, entry.version
found -= here
for p in sorted(here):
print " ", p
if found:
print "__UNKNOWN_SOURCE__"
for p in sorted(found):
print " ", p
if __name__ == '__main__':
main(sys.argv)
|
exaile/python-gtk3-gst-sdk
|
osx_bundle/misc/list_content.py
|
Python
|
gpl-2.0
| 2,435
|
#! /usr/bin/env python
from __future__ import print_function
from openturns import *
TESTPREAMBLE()
RandomGenerator.SetSeed(0)
try:
distribution = LogUniform(1.0, 2.5)
size = 10000
sample = distribution.getSample(size)
factory = LogUniformFactory()
estimatedDistribution = factory.build(sample)
print("Distribution =", distribution)
print("Estimated distribution=", estimatedDistribution)
estimatedDistribution = factory.build()
print("Default distribution=", estimatedDistribution)
estimatedDistribution = factory.build(
distribution.getParameter())
print("Distribution from parameters=", estimatedDistribution)
estimatedLogUniform = factory.buildAsLogUniform(sample)
print("LogUniform =", distribution)
print("Estimated logUniform=", estimatedLogUniform)
estimatedLogUniform = factory.buildAsLogUniform()
print("Default logUniform=", estimatedLogUniform)
estimatedLogUniform = factory.buildAsLogUniform(
distribution.getParameter())
print("LogUniform from parameters=", estimatedLogUniform)
sample = [[1.0]] * size
estimatedDistribution = factory.build(sample)
print("Estimated distribution=", repr(estimatedDistribution))
except:
import sys
print("t_LogUniformFactory_std.py", sys.exc_info()[0], sys.exc_info()[1])
|
aurelieladier/openturns
|
python/test/t_LogUniformFactory_std.py
|
Python
|
lgpl-3.0
| 1,348
|
# -*- Mode: Python; test-case-name: -*-
# vi:si:et:sw=4:sts=4:ts=4
# Flumotion - a streaming media server
# Copyright (C) 2004,2005,2006,2007,2008,2009 Fluendo, S.L.
# Copyright (C) 2010,2011 Flumotion Services, S.A.
# All rights reserved.
#
# This file may be distributed and/or modified under the terms of
# the GNU Lesser General Public License version 2.1 as published by
# the Free Software Foundation.
# This file is distributed without any warranty; without even the implied
# warranty of merchantability or fitness for a particular purpose.
# See "LICENSE.LGPL" in the source distribution for more information.
#
# Headers in this file shall remain intact.
import time
from twisted.internet import reactor
from flumotion.common import log
# Minimum size to take in account when calculating mean file read
MIN_REQUEST_SIZE = 64 * 1024 + 1
# Statistics update period
STATS_UPDATE_PERIOD = 10
class RequestStatistics(object):
def __init__(self, serverStats):
self._stats = serverStats
self.bytesSent = 0L
self._stats._onRequestStart(self)
def onDataSent(self, size):
self.bytesSent += size
self._stats._onRequestDataSent(self, size)
def onCompleted(self, size):
self._stats._onRequestComplete(self, size)
class ServerStatistics(object):
_updater = None
_callId = None
def __init__(self):
now = time.time()
self.startTime = now
self.currentRequestCount = 0
self.totalRequestCount = 0
self.requestCountPeak = 0
self.requestCountPeakTime = now
self.finishedRequestCount = 0
self.totalBytesSent = 0L
# Updated by a call to the update method
self.meanRequestCount = 0
self.currentRequestRate = 0
self.requestRatePeak = 0
self.requestRatePeakTime = now
self.meanRequestRate = 0.0
self.currentBitrate = 0
self.meanBitrate = 0
self.bitratePeak = 0
self.bitratePeakTime = now
self._fileReadRatios = 0.0
self._lastUpdateTime = now
self._lastRequestCount = 0
self._lastBytesSent = 0L
def startUpdates(self, updater):
self._updater = updater
self._set("bitrate-peak-time", self.bitratePeakTime)
self._set("request-rate-peak-time", self.requestRatePeakTime)
self._set("request-count-peak-time", self.requestCountPeakTime)
if self._callId is None:
self._callId = reactor.callLater(STATS_UPDATE_PERIOD, self._update)
def stopUpdates(self):
self._updater = None
if self._callId is not None:
self._callId.cancel()
self._callId = None
def getMeanFileReadRatio(self):
if self.finishedRequestCount > 0:
return self._fileReadRatios / self.finishedRequestCount
return 0.0
meanFileReadRatio = property(getMeanFileReadRatio)
def _update(self):
now = time.time()
updateDelta = now - self._lastUpdateTime
# Update average concurrent request
meanReqCount = self._updateAverage(self._lastUpdateTime, now,
self.meanRequestCount,
self.currentRequestCount)
# Calculate Request rate
countDiff = self.totalRequestCount - self._lastRequestCount
newReqRate = float(countDiff) / updateDelta
# Calculate average request rate
meanReqRate = self._updateAverage(self._lastUpdateTime, now,
self.currentRequestRate, newReqRate)
# Calculate current bitrate
bytesDiff = (self.totalBytesSent - self._lastBytesSent) * 8
newBitrate = bytesDiff / updateDelta
# calculate average bitrate
meanBitrate = self._updateAverage(self._lastUpdateTime, now,
self.currentBitrate, newBitrate)
# Update Values
self.meanRequestCount = meanReqCount
self.currentRequestRate = newReqRate
self.meanRequestRate = meanReqRate
self.currentBitrate = newBitrate
self.meanBitrate = meanBitrate
# Update the statistics keys with the new values
self._set("mean-request-count", meanReqCount)
self._set("current-request-rate", newReqRate)
self._set("mean-request-rate", meanReqRate)
self._set("current-bitrate", newBitrate)
self._set("mean-bitrate", meanBitrate)
# Update request rate peak
if newReqRate > self.requestRatePeak:
self.requestRatePeak = newReqRate
self.requestRatePeakTime = now
# update statistic keys
self._set("request-rate-peak", newReqRate)
self._set("request-rate-peak-time", now)
# Update bitrate peak
if newBitrate > self.bitratePeak:
self.bitratePeak = newBitrate
self.bitratePeakTime = now
# update statistic keys
self._set("bitrate-peak", newBitrate)
self._set("bitrate-peak-time", now)
# Update bytes read statistic key too
self._set("total-bytes-sent", self.totalBytesSent)
self._lastRequestCount = self.totalRequestCount
self._lastBytesSent = self.totalBytesSent
self._lastUpdateTime = now
# Log the stats
self._logStatsLine()
self._callId = reactor.callLater(STATS_UPDATE_PERIOD, self._update)
def _set(self, key, value):
if self._updater is not None:
self._updater.update(key, value)
def _onRequestStart(self, stats):
# Update counters
self.currentRequestCount += 1
self.totalRequestCount += 1
self._set("current-request-count", self.currentRequestCount)
self._set("total-request-count", self.totalRequestCount)
# Update concurrent request peak
if self.currentRequestCount > self.requestCountPeak:
now = time.time()
self.requestCountPeak = self.currentRequestCount
self.requestCountPeakTime = now
self._set("request-count-peak", self.currentRequestCount)
self._set("request-count-peak-time", now)
def _onRequestDataSent(self, stats, size):
self.totalBytesSent += size
def _onRequestComplete(self, stats, size):
self.currentRequestCount -= 1
self.finishedRequestCount += 1
self._set("current-request-count", self.currentRequestCount)
if (size > 0) and (stats.bytesSent > MIN_REQUEST_SIZE):
self._fileReadRatios += float(stats.bytesSent) / size
self._set("mean-file-read-ratio", self.meanFileReadRatio)
def _updateAverage(self, lastTime, newTime, lastValue, newValue):
lastDelta = lastTime - self.startTime
newDelta = newTime - lastTime
if lastDelta > 0:
delta = lastDelta + newDelta
before = (lastValue * lastDelta) / delta
after = (newValue * newDelta) / delta
return before + after
return lastValue
def _logStatsLine(self):
"""
Statistic fields names:
TRC: Total Request Count
CRC: Current Request Count
CRR: Current Request Rate
MRR: Mean Request Rate
FRR: File Read Ratio
MBR: Mean Bitrate
CBR: Current Bitrate
"""
log.debug("stats-http-server",
"TRC: %s; CRC: %d; CRR: %.2f; MRR: %.2f; "
"FRR: %.4f; MBR: %d; CBR: %d",
self.totalRequestCount, self.currentRequestCount,
self.currentRequestRate, self.meanRequestRate,
self.meanFileReadRatio, self.meanBitrate,
self.currentBitrate)
|
flumotion-mirror/flumotion
|
flumotion/component/misc/httpserver/serverstats.py
|
Python
|
lgpl-2.1
| 7,784
|
from __future__ import unicode_literals
from frappe import _
def get_data():
return [
{
"label": _("Documents"),
"icon": "icon-star",
"items": [
{
"type": "doctype",
"name": "Customer",
"description": _("Customer database."),
},
{
"type": "doctype",
"name": "Quotation",
"description": _("Quotes to Leads or Customers."),
},
{
"type": "doctype",
"name": "Sales Order",
"description": _("Confirmed orders from Customers."),
},
{
"type": "doctype",
"name": "Contact",
"description": _("All Contacts."),
},
{
"type": "doctype",
"name": "Address",
"description": _("All Addresses."),
},
{
"type": "doctype",
"name": "Item",
"description": _("All Products or Services."),
},
]
},
{
"label": _("Tools"),
"icon": "icon-wrench",
"items": [
{
"type": "doctype",
"name": "SMS Center",
"description":_("Send mass SMS to your contacts"),
},
{
"type": "doctype",
"name": "SMS Log",
"description":_("Logs for maintaining sms delivery status"),
},
{
"type": "doctype",
"name": "Newsletter",
"description": _("Newsletters to contacts, leads."),
},
]
},
{
"label": _("Setup"),
"icon": "icon-cog",
"items": [
{
"type": "doctype",
"name": "Selling Settings",
"description": _("Default settings for selling transactions.")
},
{
"type": "doctype",
"name": "Campaign",
"description": _("Sales campaigns."),
},
{
"type": "page",
"label": _("Customer Group"),
"name": "Sales Browser",
"icon": "icon-sitemap",
"link": "Sales Browser/Customer Group",
"description": _("Manage Customer Group Tree."),
"doctype": "Customer Group",
},
{
"type": "page",
"label": _("Territory"),
"name": "Sales Browser",
"icon": "icon-sitemap",
"link": "Sales Browser/Territory",
"description": _("Manage Territory Tree."),
"doctype": "Territory",
},
{
"type": "doctype",
"name": "Sales Partner",
"description": _("Manage Sales Partners."),
},
{
"type": "page",
"label": _("Sales Person"),
"name": "Sales Browser",
"icon": "icon-sitemap",
"link": "Sales Browser/Sales Person",
"description": _("Manage Sales Person Tree."),
"doctype": "Sales Person",
},
{
"type": "page",
"name": "Sales Browser",
"icon": "icon-sitemap",
"label": _("Item Group Tree"),
"link": "Sales Browser/Item Group",
"description": _("Tree of Item Groups."),
"doctype": "Item Group",
},
{
"type": "doctype",
"name":"Terms and Conditions",
"label": _("Terms and Conditions Template"),
"description": _("Template of terms or contract.")
},
{
"type": "doctype",
"name": "Sales Taxes and Charges Template",
"description": _("Tax template for selling transactions.")
},
{
"type": "doctype",
"name": "Shipping Rule",
"description": _("Rules for adding shipping costs.")
},
{
"type": "doctype",
"name": "Price List",
"description": _("Price List master.")
},
{
"type": "doctype",
"name": "Item Price",
"description": _("Multiple Item prices."),
"route": "Report/Item Price"
},
{
"type": "doctype",
"name": "Pricing Rule",
"description": _("Rules for applying pricing and discount.")
},
{
"type": "doctype",
"name": "Product Bundle",
"description": _("Bundle items at time of sale."),
},
{
"type": "doctype",
"name": "Email Account",
"description": _("Setup incoming server for sales email id. (e.g. sales@example.com)")
},
{
"type": "doctype",
"name": "Industry Type",
"description": _("Track Leads by Industry Type.")
},
{
"type": "doctype",
"name": "SMS Settings",
"description": _("Setup SMS gateway settings")
},
]
},
{
"label": _("Main Reports"),
"icon": "icon-table",
"items": [
{
"type": "page",
"name": "sales-analytics",
"label": _("Sales Analytics"),
"icon": "icon-bar-chart",
},
{
"type": "page",
"name": "sales-funnel",
"label": _("Sales Funnel"),
"icon": "icon-bar-chart",
},
{
"type": "report",
"is_query_report": True,
"name": "Customer Acquisition and Loyalty",
"doctype": "Customer",
"icon": "icon-bar-chart",
},
]
},
{
"label": _("Standard Reports"),
"icon": "icon-list",
"items": [
{
"type": "report",
"is_query_report": True,
"name": "Lead Details",
"doctype": "Lead"
},
{
"type": "report",
"is_query_report": True,
"name": "Customer Addresses And Contacts",
"doctype": "Contact"
},
{
"type": "report",
"is_query_report": True,
"name": "Ordered Items To Be Delivered",
"doctype": "Sales Order"
},
{
"type": "report",
"is_query_report": True,
"name": "Sales Person-wise Transaction Summary",
"doctype": "Sales Order"
},
{
"type": "report",
"is_query_report": True,
"name": "Item-wise Sales History",
"doctype": "Item"
},
{
"type": "report",
"is_query_report": True,
"name": "Territory Target Variance (Item Group-Wise)",
"route": "query-report/Territory Target Variance Item Group-Wise",
"doctype": "Territory"
},
{
"type": "report",
"is_query_report": True,
"name": "Sales Person Target Variance (Item Group-Wise)",
"route": "query-report/Sales Person Target Variance Item Group-Wise",
"doctype": "Sales Person",
},
{
"type": "report",
"is_query_report": True,
"name": "BOM Search",
"doctype": "BOM"
},
{
"type": "report",
"is_query_report": True,
"name": "Customers Not Buying Since Long Time",
"doctype": "Sales Order"
},
{
"type": "report",
"is_query_report": True,
"name": "Quotation Trends",
"doctype": "Quotation"
},
{
"type": "report",
"is_query_report": True,
"name": "Sales Order Trends",
"doctype": "Sales Order"
},
{
"type": "report",
"is_query_report": True,
"name": "Available Stock for Packing Items",
"doctype": "Item",
},
{
"type": "report",
"is_query_report": True,
"name": "Pending SO Items For Purchase Request",
"doctype": "Sales Order"
},
{
"type": "report",
"is_query_report": True,
"name": "Customer Credit Balance",
"doctype": "Customer"
},
]
},
{
"label": _("Help"),
"items": [
{
"type": "help",
"label": _("Customer and Supplier"),
"youtube_id": "anoGi_RpQ20"
},
{
"type": "help",
"label": _("Sales Order to Payment"),
"youtube_id": "7AMq4lqkN4A"
},
{
"type": "help",
"label": _("Point-of-Sale"),
"youtube_id": "4WkelWkbP_c"
},
]
},
]
|
mahabuber/erpnext
|
erpnext/config/selling.py
|
Python
|
agpl-3.0
| 7,142
|
import os, sys
from os.path import isdir, join
dir = sys.argv[1] if len(sys.argv) > 1 else '.'
print 'digraph {'
for i in os.listdir(dir):
if not isdir(join(dir, i)):
i = i.rsplit('.', 1)[0]
parts = i.split('=')
print ' -> '.join(parts[:2])
# TODO: add label (optional section 3)
print '}'
|
stevenrobertson/cuburn
|
scripts/graph.py
|
Python
|
gpl-2.0
| 316
|
log.info("Restoring core configs.")
bot["configs"] = {'Webserver': {'SSL': {'port': 3142, 'certificate': '', 'enabled': False, 'host': '0.0.0.0', 'key': ''}, 'PORT': 3141, 'HOST': '0.0.0.0'}, 'TwilioLookup': {'TWILIO_ACCOUNT_SID': '{{ twilio_account_sid }}', 'TWILIO_AUTH_TOKEN': '{{ twilio_auth_token }}'}}
log.info("Installing plugins.")
for repo in bot["repos"]:
errors = bot.install(repo)
for error in errors:
log.error(error)
log.info("Restoring plugins data.")
pobj = get_plugin_by_name("Plugins").plugin_object
pobj.init_storage()
pobj["repos"] = {}
pobj.close_storage()
|
RobSpectre/salt-states
|
slackbot/backup.py
|
Python
|
mit
| 590
|
import pymysql
pymysql.install_as_MySQLdb()
|
HarrisonHDU/myerp
|
erp/__init__.py
|
Python
|
mit
| 46
|
from __future__ import absolute_import
from sentry.testutils import AcceptanceTestCase
class AuthTest(AcceptanceTestCase):
def enter_auth(self, username, password):
# disable captcha as it makes these tests flakey (and requires waiting
# on external resources)
with self.settings(RECAPTCHA_PUBLIC_KEY=None):
self.browser.get('/auth/login/')
self.browser.find_element_by_id('id_username').send_keys(username)
self.browser.find_element_by_id('id_password').send_keys(password)
self.browser.find_element_by_xpath("//button[contains(text(), 'Login')]").click()
def test_renders(self):
self.browser.get('/auth/login/')
self.browser.snapshot(name='login')
def test_no_credentials(self):
self.enter_auth('', '')
self.browser.snapshot(name='login fields required')
def test_invalid_credentials(self):
self.enter_auth('bad-username', 'bad-username')
self.browser.snapshot(name='login fields invalid')
def test_success(self):
email = 'dummy@example.com'
password = 'dummy'
user = self.create_user(email=email)
user.set_password(password)
user.save()
self.enter_auth(email, password)
self.browser.snapshot(name='login success')
|
mitsuhiko/sentry
|
tests/acceptance/test_auth.py
|
Python
|
bsd-3-clause
| 1,321
|
# -*- coding: UTF-8 -*-
# COPYRIGHT (c) 2016 Cristóbal Ganter
#
# GNU AFFERO GENERAL PUBLIC LICENSE
# Version 3, 19 November 2007
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published
# by the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from functools import partialmethod
from weakref import finalize
from tornado.gen import coroutine
from src import messages as msg
from src.db import message_broker as mb
from src.pub_sub import MalformedMessageError, \
UnrecognizedOwnerError
_path = 'src.swclass'
class WSClass(object):
"""Attaches its methods to a controller.MSGHandler.
.. todo::
* Explain this class better XD.
"""
_path = '.'.join((_path, 'WSClass'))
def __init__(self, handler):
_path = msg.join_path(self._path, '__init__')
self.handler = handler
self.pub_subs = {
'w': self.handler.ws_pub_sub,
'd': mb,
'l': self.handler.local_pub_sub,
}
for attr_name in dir(self):
attribute = getattr(self, attr_name)
if hasattr(attribute, 'msg_types'):
for _type, channels in attribute.msg_types:
msg.code_debug(
_path,
'Adding action: %r ...' % attribute
)
self.register_action_in(
msg_type=_type, action=attribute,
channels=channels)
finalize(
self, msg.code_debug, self._path,
'Deleting WSClass {0} from {0.handler} '
'...'.format(self)
)
@property
def channels(self):
return self.pub_subs.keys()
def redirect_to(self, channel, message, content=False):
"""Redirect ``message`` through ``channel``.
If ``content`` is ``True``, then the the object
corresponding to the ``'content'`` key of
``message`` is sent.
:param str channel:
The channel through which ``message`` will be
sent.
:param dict message:
The message to be sent.
:param bool content:
If ``True``, just the object corresponding to
the ``'content'`` key of ``message`` will be
sent.
If ``False``, the whole message will be sent.
:raises MalformedMessageError:
If ``content`` is ``True``, but ``message``
doesn't have the ``'content'`` key.
:raises BadChannelArgumentError:
If ``channel`` is not one of ``self.pub_subs``
keys.
:raises NotDictError:
If ``message`` is not a dictionary.
:raises NoMessageTypeError:
If the message or it's content doesn't have the
``'type'`` key.
:raises NoActionForMsgTypeError:
If ``send_function`` of the ``PubSub`` object
wasn't specified during object creation and
there's no registered action for this message
type.
"""
try:
m = message['content'] if content else message
self.pub_subs[channel].send_message(m)
except KeyError as ke:
if 'content' not in message:
mme = MalformedMessageError(
"If content=True, then message must "
"have the 'content' key."
)
raise mme from ke
elif channel not in self.pub_subs:
raise \
BadChannelArgumentError(self.channels) \
from ke
else:
raise
redirect_content_to = partialmethod(redirect_to,
content=True)
def register_action_in(self, msg_type, action,
channels):
"""Register ``action`` in a set of channels.
:param str msg_type:
The message type to which ``action`` will be
subscribed.
:param callable action:
The action to be registered in ``channels``.
:param set channels:
Set of strings, which identify all the channels
to which ``action`` will be registered.
:raises BadChannelArgumentError:
If any channel is not one of ``self.pub_subs``
keys.
"""
try:
for channel in channels:
ps = self.pub_subs[channel]
ps.register(msg_type, action, self)
except KeyError as ke:
if not all(c in self.pub_subs
for c in channels):
raise \
BadChannelArgumentError(self.channels) \
from ke
else:
raise
def unregister(self):
for ps in self.pub_subs.values():
try:
ps.remove_owner(self)
except UnrecognizedOwnerError:
pass
@coroutine
def end(self):
self.unregister()
class subscribe(object):
"""Append the ``msg_types`` attribute to a method.
Each parameter should have one of the following forms:
``type``, ``(type, channel)`` or
``(type, {channel, ...})``. Where ``type`` is a string
containing the message_type to which you want the method
to be subscribed and ``channel`` is one of this strings:
``'w'``, ``'d'``, ``'l'``. The channel strings mean:
Websocket, Database and Local.
If there are only 2 string parameters and the second is
one character long then this parameters are interpreted
as ``subscribe(type, channel)``.
This class should be used as a decorator.
:raises TypeError:
If any element of ``msg_types`` is not a tuple or a
string.
:raises ValueError:
If any tuple in ``msg_types`` has a length different
than 2.
"""
_path = '.'.join((_path, 'subscribe'))
def __init__(self, *msg_types,
channels={'w', 'd', 'l'}):
if len(msg_types) == 2 and \
isinstance(msg_types[0], str) and \
isinstance(msg_types[1], str) and \
len(msg_types[1]) == 1:
msg_types = ((msg_types[0], msg_types[1]),)
for t in msg_types:
if not isinstance(t, (tuple, str)):
raise TypeError(
'msg_types has an element that is not '
'a tuple or a string.'
)
if isinstance(t, tuple) and len(t) != 2:
raise ValueError(
'msg_types has a tuple that has a '
'length different than 2.'
)
self.msg_types = [(t, channels)
for t in msg_types
if isinstance(t, str)]
self.msg_types.extend(
(t[0], {t[1]})
if isinstance(t[1], str)
else t
for t in msg_types
if isinstance(t, tuple)
)
def __call__(self, method):
_path = '.'.join((self._path, '__call__'))
msg.code_debug(
_path,
'Subscribing method {!r} to {!r} message types '
'...'.format(method, self.msg_types)
)
method.msg_types = self.msg_types
return method
class BadChannelArgumentError(ValueError):
def __init__(channels, *args):
super().__init__(
'The channel argument must be one of the '
'following strings: {}.'.format(channels),
*args
)
|
TelematicaUSM/EduRT
|
src/wsclass.py
|
Python
|
agpl-3.0
| 8,134
|
from PyQt4.QtGui import *
from electrum_myr import BasePlugin
from electrum_myr.i18n import _
from electrum_myr.plugins import BasePlugin, hook
from electrum_myr.i18n import _
class Plugin(BasePlugin):
def fullname(self):
return 'Virtual Keyboard'
def description(self):
return '%s\n%s' % (_("Add an optional virtual keyboard to the password dialog."), _("Warning: do not use this if it makes you pick a weaker password."))
@hook
def init_qt(self, gui):
self.gui = gui
self.vkb = None
self.vkb_index = 0
@hook
def password_dialog(self, pw, grid, pos):
vkb_button = QPushButton(_("+"))
vkb_button.setFixedWidth(20)
vkb_button.clicked.connect(lambda: self.toggle_vkb(grid, pw))
grid.addWidget(vkb_button, pos, 2)
self.kb_pos = 2
def toggle_vkb(self, grid, pw):
if self.vkb: grid.removeItem(self.vkb)
self.vkb = self.virtual_keyboard(self.vkb_index, pw)
grid.addLayout(self.vkb, self.kb_pos, 0, 1, 3)
self.vkb_index += 1
def virtual_keyboard(self, i, pw):
import random
i = i%3
if i == 0:
chars = 'abcdefghijklmnopqrstuvwxyz '
elif i == 1:
chars = 'ABCDEFGHIJKLMNOPQRTSUVWXYZ '
elif i == 2:
chars = '1234567890!?.,;:/%&()[]{}+-'
n = len(chars)
s = []
for i in xrange(n):
while True:
k = random.randint(0,n-1)
if k not in s:
s.append(k)
break
def add_target(t):
return lambda: pw.setText(str( pw.text() ) + t)
vbox = QVBoxLayout()
grid = QGridLayout()
grid.setSpacing(2)
for i in range(n):
l_button = QPushButton(chars[s[i]])
l_button.setFixedWidth(25)
l_button.setFixedHeight(25)
l_button.clicked.connect(add_target(chars[s[i]]) )
grid.addWidget(l_button, i/6, i%6)
vbox.addLayout(grid)
return vbox
|
wozz/electrum-myr
|
plugins/virtualkeyboard.py
|
Python
|
gpl-3.0
| 2,092
|
"""\
Core Linear Algebra Tools
-------------------------
Linear algebra basics:
- norm Vector or matrix norm
- inv Inverse of a square matrix
- solve Solve a linear system of equations
- det Determinant of a square matrix
- lstsq Solve linear least-squares problem
- pinv Pseudo-inverse (Moore-Penrose) calculated using a singular
value decomposition
- matrix_power Integer power of a square matrix
Eigenvalues and decompositions:
- eig Eigenvalues and vectors of a square matrix
- eigh Eigenvalues and eigenvectors of a Hermitian matrix
- eigvals Eigenvalues of a square matrix
- eigvalsh Eigenvalues of a Hermitian matrix
- qr QR decomposition of a matrix
- svd Singular value decomposition of a matrix
- cholesky Cholesky decomposition of a matrix
Tensor operations:
- tensorsolve Solve a linear tensor equation
- tensorinv Calculate an inverse of a tensor
Exceptions:
- LinAlgError Indicates a failed linear algebra operation
"""
from __future__ import division, absolute_import, print_function
depends = ['core']
|
LumPenPacK/NetworkExtractionFromImages
|
win_build/nefi2_win_amd64_msvc_2015/site-packages/numpy/linalg/info.py
|
Python
|
bsd-2-clause
| 1,198
|
"""Compatibility fixes for older version of python, numpy and scipy
If you add content to this file, please give the version of the package
at which the fix is no longer needed.
"""
# Authors: Marcel Caraciolo <marcel@pingmind.com>
# License: BSD
import numpy as np
import inspect
# little danse to see if np.copy has an 'order' keyword argument
if 'order' in inspect.getargspec(np.copy)[0]:
def safe_copy(X):
# Copy, but keep the order
return np.copy(X, order='K')
else:
# Before an 'order' argument was introduced, numpy wouldn't muck with
# the ordering
safe_copy = np.copy
|
python-recsys/crab
|
crab/utils/fixes.py
|
Python
|
bsd-3-clause
| 612
|
# Copyright 2016 Red Hat, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Wrapper for GetManagedObjects() result.
"""
import xml.etree.ElementTree as ET
from dbus_client_gen import managed_object_class
from dbus_client_gen import mo_query_builder
from ._data import SPECS
pools = mo_query_builder(ET.fromstring(SPECS['org.storage.stratis1.pool']))
filesystems = mo_query_builder(ET.fromstring(SPECS['org.storage.stratis1.filesystem']))
blockdevs = mo_query_builder(ET.fromstring(SPECS['org.storage.stratis1.blockdev']))
MOPool = managed_object_class(
"MOPool",
ET.fromstring(SPECS['org.storage.stratis1.pool'])
)
MOBlockDev = managed_object_class(
"MOBlockDev",
ET.fromstring(SPECS['org.storage.stratis1.blockdev'])
)
|
stratis-storage/stratisd-client-dbus
|
src/stratisd_client_dbus/_managedobjects.py
|
Python
|
apache-2.0
| 1,243
|
# This is open-source software licensed under a BSD license.
# Please see the file LICENSE.txt for details.
#
"""
These pipeline stages are used in creating the StandardPipelineRenderer,
the default renderer for standard widget backends.
[createbg] => [overlays] => [iccprof] => [flipswap] => [rotate] => [output]
"""
import time
import numpy as np
from ginga import trcalc
from .base import Stage, StageError
class CreateBg(Stage):
_stagename = 'viewer-createbg'
def __init__(self, viewer):
super(CreateBg, self).__init__()
self.viewer = viewer
self.dtype = np.uint8
def run(self, prev_stage):
if prev_stage is not None:
raise StageError("'{}' in wrong location".format(self._stagename))
if self._bypass:
self.pipeline.send(res_np=None)
return
state = self.pipeline.get('state')
win_wd, win_ht = state.win_dim
# calc minimum size of pixel image we will generate
# necessary to fit the window in the desired size
# Make a square from the scaled cutout, with room to rotate
slop = 20
side = int(np.sqrt(win_wd**2 + win_ht**2) + slop)
wd = ht = side
# Find center of new array
ncx, ncy = wd // 2, ht // 2
depth = len(state.order)
# make backing image with the background color
r, g, b = self.viewer.get_bg()
res_np = trcalc.make_filled_array((ht, wd, depth), self.dtype,
state.order, r, g, b, 1.0)
self.pipeline.set(org_dim=(wd, ht), org_off=(ncx, ncy))
self.pipeline.send(res_np=res_np)
class ICCProf(Stage):
"""Convert the given RGB data from the input ICC profile
to the output ICC profile.
"""
_stagename = 'viewer-icc-profiler'
def __init__(self, viewer):
super(ICCProf, self).__init__()
self.viewer = viewer
def run(self, prev_stage):
data = self.pipeline.get_data(prev_stage)
self.verify_2d(data)
from ginga.util import rgb_cms
working_profile = rgb_cms.working_profile
t_ = self.viewer.get_settings()
output_profile = t_.get('icc_output_profile', None)
if self._bypass or None in [working_profile, output_profile]:
self.pipeline.set(icc_output_profile=working_profile)
self.pipeline.send(res_np=data)
return
# color profiling will not work with other types
data = data.astype(np.uint8)
alpha = None
ht, wd, dp = data.shape
if dp > 3:
# color profile conversion does not handle an alpha layer
alpha = data[:, :, 3]
data = data[:, :, 0:3]
# get rest of necessary conversion parameters
to_intent = t_.get('icc_output_intent', 'perceptual')
proofprof_name = t_.get('icc_proof_profile', None)
proof_intent = t_.get('icc_proof_intent', 'perceptual')
use_black_pt = t_.get('icc_black_point_compensation', False)
try:
data = rgb_cms.convert_profile_fromto(data,
working_profile,
output_profile,
to_intent=to_intent,
proof_name=proofprof_name,
proof_intent=proof_intent,
use_black_pt=use_black_pt,
logger=self.logger)
self.logger.debug("Converted from '%s' to '%s' profile" % (
working_profile, output_profile))
except Exception as e:
self.logger.warning("Error converting output from working profile: %s" % (str(e)))
# TODO: maybe should have a traceback here
self.logger.info("Output left unprofiled")
if alpha is not None:
data = trcalc.add_alpha(data, alpha)
self.pipeline.set(icc_output_profile=output_profile)
self.pipeline.send(res_np=data)
class FlipSwap(Stage):
_stagename = 'viewer-flipswap'
def __init__(self, viewer):
super(FlipSwap, self).__init__()
self.viewer = viewer
def run(self, prev_stage):
data = self.pipeline.get_data(prev_stage)
self.verify_2d(data)
xoff, yoff = self.pipeline.get('org_off')
if not self._bypass:
flip_x, flip_y, swap_xy = self.viewer.get_transforms()
ht, wd = data.shape[:2]
# Do transforms as necessary
data = trcalc.transform(data, flip_x=flip_x, flip_y=flip_y,
swap_xy=swap_xy)
if flip_y:
yoff = ht - yoff
if flip_x:
xoff = wd - xoff
if swap_xy:
xoff, yoff = yoff, xoff
self.pipeline.set(off=(xoff, yoff))
self.pipeline.send(res_np=data)
class Rotate(Stage):
_stagename = 'viewer-rotate'
def __init__(self, viewer):
super(Rotate, self).__init__()
self.viewer = viewer
def run(self, prev_stage):
data = self.pipeline.get_data(prev_stage)
self.verify_2d(data)
if not self._bypass:
rot_deg = self.viewer.get_rotation()
if not np.isclose(rot_deg, 0.0):
data = np.copy(data)
#data = np.ascontiguousarray(data)
data = trcalc.rotate_clip(data, -rot_deg, out=data,
logger=self.logger)
# apply other transforms
if self.viewer._invert_y:
# Flip Y for natural Y-axis inversion between FITS coords
# and screen coords
data = np.flipud(data)
# dimensions may have changed in transformations
ht, wd = data.shape[:2]
xoff, yoff = self.pipeline.get('off')
state = self.pipeline.get('state')
ctr_x, ctr_y = state.ctr
dst_x, dst_y = ctr_x - xoff, ctr_y - (ht - yoff)
self.pipeline.set(dst=(dst_x, dst_y))
self.logger.debug("ctr=%d,%d off=%d,%d dst=%d,%d cutout=%dx%d" % (
ctr_x, ctr_y, xoff, yoff, dst_x, dst_y, wd, ht))
win_wd, win_ht = state.win_dim
self.logger.debug("win=%d,%d coverage=%d,%d" % (
win_wd, win_ht, dst_x + wd, dst_y + ht))
self.pipeline.send(res_np=data)
class Output(Stage):
_stagename = 'viewer-output'
def __init__(self, viewer):
super(Output, self).__init__()
self.viewer = viewer
def run(self, prev_stage):
data = self.pipeline.get_data(prev_stage)
## assert (len(data.shape) == 3 and data.dtype == np.uint8 and
## data.shape[2] in [3, 4]), \
## StageError("Expecting a RGB[A] image in final stage")
self.verify_2d(data)
state = self.pipeline.get('state')
out_order = state.order
if not self._bypass:
ht, wd = data.shape[:2]
state = self.pipeline.get('state')
win_wd, win_ht = state.win_dim
if wd < win_wd or ht < win_ht:
raise StageError("pipeline output doesn't cover window")
# now cut out the size that we need
dst_x, dst_y = self.pipeline.get('dst')
if dst_x > 0 or dst_y > 0:
raise StageError("pipeline calculated dst is not correct")
x1, y1 = abs(dst_x), abs(dst_y)
data = data[y1:y1 + win_ht, x1:x1 + win_wd]
# reorder image for renderer's desired format
dst_order = self.viewer.renderer.get_rgb_order()
data = trcalc.reorder_image(dst_order, data, state.order)
data = np.ascontiguousarray(data)
out_order = dst_order
self.pipeline.set(out_order=out_order)
self.pipeline.send(res_np=data)
class Overlays(Stage):
_stagename = 'viewer-image-overlays'
def __init__(self, viewer):
super(Overlays, self).__init__()
self.viewer = viewer
def run(self, prev_stage):
bgarr = self.pipeline.get_data(prev_stage)
self.verify_2d(bgarr)
dstarr = np.copy(bgarr)
self._rgbarr2 = dstarr
self.pipeline.set(dstarr=dstarr)
whence = self.pipeline.get('whence')
p_canvas = self.viewer.get_private_canvas()
self._overlay_images(p_canvas, whence=whence)
self.pipeline.send(res_np=dstarr)
def _overlay_images(self, canvas, whence=0.0):
if not hasattr(canvas, 'objects'):
return
for obj in canvas.get_objects():
if hasattr(obj, 'prepare_image'):
obj.prepare_image(self.viewer, whence)
elif obj.is_compound() and (obj != canvas):
self._overlay_images(obj, whence=whence)
def _common_draw(self, cvs_img, cache, whence):
# internal common drawing phase for all images
image = cvs_img.image
if image is None:
return
dstarr = self._rgbarr2
if (whence <= 0.0) or (cache.cutout is None) or (not cvs_img.optimize):
# get extent of our data coverage in the window
# TODO: get rid of padding by fixing get_draw_rect() which
# doesn't quite get the coverage right at high magnifications
pad = 1.0
pts = np.asarray(self.viewer.get_draw_rect()).T
xmin = int(np.min(pts[0])) - pad
ymin = int(np.min(pts[1])) - pad
xmax = int(np.ceil(np.max(pts[0]))) + pad
ymax = int(np.ceil(np.max(pts[1]))) + pad
# get destination location in data_coords
dst_x, dst_y = cvs_img.crdmap.to_data((cvs_img.x, cvs_img.y))
a1, b1, a2, b2 = 0, 0, cvs_img.image.width - 1, cvs_img.image.height - 1
# calculate the cutout that we can make and scale to merge
# onto the final image--by only cutting out what is necessary
# this speeds scaling greatly at zoomed in sizes
((dst_x, dst_y), (a1, b1), (a2, b2)) = \
trcalc.calc_image_merge_clip((xmin, ymin), (xmax, ymax),
(dst_x, dst_y),
(a1, b1), (a2, b2))
# is image completely off the screen?
if (a2 - a1 <= 0) or (b2 - b1 <= 0):
# no overlay needed
cache.cutout = None
return
# cutout and scale the piece appropriately by the viewer scale
scale_x, scale_y = self.viewer.get_scale_xy()
# scale additionally by our scale
_scale_x, _scale_y = (scale_x * cvs_img.scale_x,
scale_y * cvs_img.scale_y)
interp = cvs_img.interpolation
if interp is None:
t_ = self.viewer.get_settings()
interp = t_.get('interpolation', 'basic')
# previous choice might not be available if preferences
# were saved when opencv was being used (and not used now);
# if so, silently default to "basic"
if interp not in trcalc.interpolation_methods:
interp = 'basic'
res = image.get_scaled_cutout2((a1, b1), (a2, b2),
(_scale_x, _scale_y),
method=interp)
data = res.data
if cvs_img.flipy:
data = np.flipud(data)
cache.cutout = data
# calculate our offset from the pan position
pan_x, pan_y = self.viewer.get_pan()
pan_off = self.viewer.data_off
pan_x, pan_y = pan_x + pan_off, pan_y + pan_off
off_x, off_y = dst_x - pan_x, dst_y - pan_y
# scale offset
off_x *= scale_x
off_y *= scale_y
# dst position in the pre-transformed array should be calculated
# from the center of the array plus offsets
ht, wd, dp = dstarr.shape
cvs_x = int(np.round(wd / 2.0 + off_x))
cvs_y = int(np.round(ht / 2.0 + off_y))
cache.cvs_pos = (cvs_x, cvs_y)
def _prepare_image(self, cvs_img, cache, whence):
if whence > 2.3 and cache.rgbarr is not None:
return
dstarr = self._rgbarr2
t1 = t2 = time.time()
self._common_draw(cvs_img, cache, whence)
if cache.cutout is None:
return
cache.rgbarr = cache.cutout
t2 = time.time()
state = self.pipeline.get('state')
dst_order = state.order
image_order = cvs_img.image.get_order()
# composite the image into the destination array at the
# calculated position
trcalc.overlay_image(dstarr, cache.cvs_pos, cache.rgbarr,
dst_order=dst_order, src_order=image_order,
alpha=cvs_img.alpha, fill=True, flipy=False)
cache.drawn = True
t3 = time.time()
self.logger.debug("draw: t2=%.4f t3=%.4f total=%.4f" % (
t2 - t1, t3 - t2, t3 - t1))
def _prepare_norm_image(self, cvs_img, cache, whence):
if whence > 2.3 and cache.rgbarr is not None:
return
dstarr = self._rgbarr2
t1 = t2 = t3 = t4 = time.time()
self._common_draw(cvs_img, cache, whence)
if cache.cutout is None:
return
t2 = time.time()
if cvs_img.rgbmap is not None:
rgbmap = cvs_img.rgbmap
else:
rgbmap = self.viewer.get_rgbmap()
image_order = cvs_img.image.get_order()
if (whence <= 0.0) or (not cvs_img.optimize):
# if image has an alpha channel, then strip it off and save
# it until it is recombined later with the colorized output
# this saves us having to deal with an alpha band in the
# cuts leveling and RGB mapping routines
img_arr = cache.cutout
if 'A' not in image_order:
cache.alpha = None
else:
# normalize alpha array to the final output range
mn, mx = trcalc.get_minmax_dtype(img_arr.dtype)
a_idx = image_order.index('A')
cache.alpha = (img_arr[..., a_idx] / mx *
rgbmap.maxc).astype(rgbmap.dtype)
cache.cutout = img_arr[..., 0:a_idx]
if (whence <= 1.0) or (cache.prergb is None) or (not cvs_img.optimize):
# apply visual changes prior to color mapping (cut levels, etc)
vmax = rgbmap.get_hash_size() - 1
newdata = self._apply_visuals(cvs_img, cache.cutout, 0, vmax)
# result becomes an index array fed to the RGB mapper
if not np.issubdtype(newdata.dtype, np.dtype('uint')):
newdata = newdata.astype(np.uint)
idx = newdata
self.logger.debug("shape of index is %s" % (str(idx.shape)))
cache.prergb = idx
t3 = time.time()
state = self.pipeline.get('state')
dst_order = state.order
if (whence <= 2.0) or (cache.rgbarr is None) or (not cvs_img.optimize):
# get RGB mapped array
rgbobj = rgbmap.get_rgbarray(cache.prergb, order=dst_order,
image_order=image_order)
cache.rgbarr = rgbobj.get_array(dst_order)
if cache.alpha is not None and 'A' in dst_order:
a_idx = dst_order.index('A')
cache.rgbarr[..., a_idx] = cache.alpha
t4 = time.time()
# composite the image into the destination array at the
# calculated position
trcalc.overlay_image(dstarr, cache.cvs_pos, cache.rgbarr,
dst_order=dst_order, src_order=dst_order,
alpha=cvs_img.alpha, fill=True, flipy=False)
cache.drawn = True
t5 = time.time()
self.logger.debug("draw: t2=%.4f t3=%.4f t4=%.4f t5=%.4f total=%.4f" % (
t2 - t1, t3 - t2, t4 - t3, t5 - t4, t5 - t1))
def _apply_visuals(self, cvs_img, data, vmin, vmax):
if cvs_img.autocuts is not None:
autocuts = cvs_img.autocuts
else:
autocuts = self.viewer.autocuts
# Apply cut levels
if cvs_img.cuts is not None:
loval, hival = cvs_img.cuts
else:
loval, hival = self.viewer.t_['cuts']
newdata = autocuts.cut_levels(data, loval, hival,
vmin=vmin, vmax=vmax)
return newdata
##########################
class Overlays2(Stage):
_stagename = 'viewer-image-overlays'
def __init__(self, viewer):
super(Overlays2, self).__init__()
self.viewer = viewer
def run(self, prev_stage):
bgarr = self.pipeline.get_data(prev_stage)
self.verify_2d(bgarr)
dstarr = np.copy(bgarr)
self.pipeline.set(dstarr=dstarr)
whence = self.pipeline.get('whence')
p_canvas = self.viewer.get_private_canvas()
self._overlay_images(p_canvas, whence=whence)
self.pipeline.send(res_np=dstarr)
def _overlay_images(self, canvas, whence=0.0):
if not hasattr(canvas, 'objects'):
return
for obj in canvas.get_objects():
if hasattr(obj, 'prepare_image'):
obj.prepare_image(self.viewer, whence)
elif obj.is_compound() and (obj != canvas):
self._overlay_images(obj, whence=whence)
def _prepare_image(self, cvs_img, cache, whence):
from ginga.util import pipeline
pipe = cache.get('minipipe', None)
if pipe is None:
stages = [Clip(self.viewer),
Merge(self.viewer)]
pipe = pipeline.Pipeline(self.logger, stages)
pipe.name = 'image-overlays'
cache.minipipe = pipe
state = self.pipeline.get('state')
pipe.set(whence=whence, cvs_img=cvs_img, state=state,
dstarr=self.pipeline.get('dstarr'))
if whence <= 0:
pipe.run_from(pipe[0])
return
if not cache.visible:
return
pipe.run_from(pipe[1])
def _prepare_norm_image(self, cvs_img, cache, whence):
from ginga.util import pipeline
pipe = cache.get('minipipe', None)
if pipe is None:
stages = [Clip(self.viewer),
Cuts(self.viewer),
RGBMap(self.viewer),
Merge(self.viewer)]
pipe = pipeline.Pipeline(self.logger, stages)
pipe.name = 'image-overlays'
cache.minipipe = pipe
state = self.pipeline.get('state')
pipe.set(whence=whence, cvs_img=cvs_img, state=state,
dstarr=self.pipeline.get('dstarr'))
if whence <= 0:
pipe.run_from(pipe[0])
return
if not cache.visible:
return
elif whence <= 1:
pipe.run_from(pipe[1])
elif whence <= 2:
pipe.run_from(pipe[2])
else:
pipe.run_from(pipe[3])
class Clip(Stage):
_stagename = 'viewer-clip'
def __init__(self, viewer):
super(Clip, self).__init__()
self.viewer = viewer
def run(self, prev_stage):
#assert prev_stage is None, StageError("'viewclip' in wrong location")
cvs_img = self.pipeline.get('cvs_img')
cache = cvs_img.get_cache(self.viewer)
image = cvs_img.get_image()
if image is None:
self.pipeline.send(res_np=None)
return
data_np = image.get_data()
self.verify_2d(data_np)
# get extent of our data coverage in the window
# TODO: get rid of padding by fixing get_draw_rect() which
# doesn't quite get the coverage right at high magnifications
pad = 1.0
pts = np.asarray(self.viewer.get_draw_rect()).T
xmin = int(np.min(pts[0])) - pad
ymin = int(np.min(pts[1])) - pad
xmax = int(np.ceil(np.max(pts[0]))) + pad
ymax = int(np.ceil(np.max(pts[1]))) + pad
# get destination location in data_coords
img = cvs_img
dst_x, dst_y = img.crdmap.to_data((img.x, img.y))
ht, wd = data_np.shape[:2]
# TODO: think we need to apply scaling factors to wd/ht
# BEFORE we calculate merge clip
a1, b1, a2, b2 = 0, 0, wd - 1, ht - 1
# calculate the cutout that we can make and scale to merge
# onto the final image--by only cutting out what is necessary
# this speeds scaling greatly at zoomed in sizes
((dst_x, dst_y), (a1, b1), (a2, b2)) = \
trcalc.calc_image_merge_clip((xmin, ymin), (xmax, ymax),
(dst_x, dst_y),
(a1, b1), (a2, b2))
# is image completely off the screen?
if (a2 - a1 <= 0) or (b2 - b1 <= 0):
# no overlay needed
self.pipeline.send(res_np=None)
cache.visible = False
self.pipeline.stop()
return
cache.visible = True
# cutout and scale the piece appropriately by the viewer scale
scale_x, scale_y = self.viewer.get_scale_xy()
# scale additionally by scale specified in canvas image
_scale_x, _scale_y = (scale_x * img.scale_x,
scale_y * img.scale_y)
interp = img.interpolation
if interp is None:
t_ = self.viewer.get_settings()
interp = t_.get('interpolation', 'basic')
if interp not in trcalc.interpolation_methods:
interp = 'basic'
data, scales = trcalc.get_scaled_cutout_basic(data_np, a1, b1, a2, b2,
_scale_x, _scale_y,
interpolation=interp,
logger=self.logger)
if img.flipy:
data = np.flipud(data)
# calculate our offset from the pan position
pan_x, pan_y = self.viewer.get_pan()
pan_off = self.viewer.data_off
pan_x, pan_y = pan_x + pan_off, pan_y + pan_off
off_x, off_y = dst_x - pan_x, dst_y - pan_y
# scale offset
off_x *= scale_x
off_y *= scale_y
self.pipeline.set(offset=(off_x, off_y))
## if cvs_img.rgbmap is not None:
## rgbmap = cvs_img.rgbmap
## else:
rgbmap = self.viewer.get_rgbmap()
state = self.pipeline.get('state')
image_order = image.get_order()
## if image_order != state.order:
## # reorder image channels for pipeline
## data = trcalc.reorder_image(state.order, data, image_order)
if 'A' not in image_order:
alpha = None
else:
# if image has an alpha channel, then strip it off and save
# it until it is recombined later with the colorized output
# this saves us having to deal with an alpha band in the
# cuts leveling and RGB mapping routines
# normalize alpha array to the final output range
mn, mx = trcalc.get_minmax_dtype(data.dtype)
a_idx = image_order.index('A')
alpha = (data[..., a_idx] / mx *
rgbmap.maxc).astype(rgbmap.dtype)
data = data[..., 0:a_idx]
ht, wd, dp = data.shape
if dp == 1:
data = data.reshape((ht, wd))
self.pipeline.set(alpha=alpha)
self.pipeline.send(res_np=data)
class Merge(Stage):
_stagename = 'viewer-merge-overlay'
def __init__(self, viewer):
super(Merge, self).__init__()
self.viewer = viewer
def run(self, prev_stage):
rgbarr = self.pipeline.get_data(prev_stage)
if rgbarr is None:
# nothing to merge
return
self.verify_2d(rgbarr)
cvs_img = self.pipeline.get('cvs_img')
off_x, off_y = self.pipeline.get('offset')
dstarr = self.pipeline.get('dstarr')
state = self.pipeline.get('state')
# dst position in the pre-transformed array should be calculated
# from the center of the array plus offsets
ht, wd, dp = dstarr.shape
cvs_x = int(np.round(wd / 2.0 + off_x))
cvs_y = int(np.round(ht / 2.0 + off_y))
cvs_pos = (cvs_x, cvs_y)
dst_order = state.order
image_order = state.order
## alpha = self.pipeline.get('alpha')
## if alpha is not None:
## rgbarr[..., -1] = alpha
# composite the image into the destination array at the
# calculated position
trcalc.overlay_image(dstarr, cvs_pos, rgbarr,
dst_order=dst_order, src_order=image_order,
# NOTE: these actually not used because rgbarr
# contains an alpha channel
alpha=cvs_img.alpha, fill=True,
flipy=False) # cvs_img.flipy
cache = cvs_img.get_cache(self.viewer)
cache.drawn = True
#self.pipeline.send(res_np=None)
class Cuts(Stage):
_stagename = 'viewer-cut-levels'
def __init__(self, viewer):
super(Cuts, self).__init__()
self.viewer = viewer
def run(self, prev_stage):
data = self.pipeline.get_data(prev_stage)
if data is None:
self.pipeline.send(res_np=None)
return
self.verify_2d(data)
cvs_img = self.pipeline.get('cvs_img')
if cvs_img.rgbmap is not None:
rgbmap = cvs_img.rgbmap
else:
rgbmap = self.viewer.get_rgbmap()
vmin = 0
vmax = rgbmap.get_hash_size() - 1
if cvs_img.autocuts is not None:
autocuts = cvs_img.autocuts
else:
autocuts = self.viewer.autocuts
# Apply cut levels
if cvs_img.cuts is not None:
loval, hival = cvs_img.cuts
else:
loval, hival = self.viewer.t_['cuts']
res_np = autocuts.cut_levels(data, loval, hival,
vmin=vmin, vmax=vmax)
# NOTE: optimization to prevent multiple coercions in
# RGBMap
if not np.issubdtype(res_np.dtype, np.uint):
res_np = res_np.astype(np.uint)
self.pipeline.send(res_np=res_np)
class RGBMap(Stage):
_stagename = 'viewer-rgb-mapper'
def __init__(self, viewer):
super(RGBMap, self).__init__()
self.viewer = viewer
def run(self, prev_stage):
data = self.pipeline.get_data(prev_stage)
if data is None:
self.pipeline.send(res_np=None)
return
self.verify_2d(data)
cvs_img = self.pipeline.get('cvs_img')
state = self.pipeline.get('state')
if cvs_img.rgbmap is not None:
rgbmap = cvs_img.rgbmap
else:
rgbmap = self.viewer.get_rgbmap()
# See NOTE in Cuts
## if not np.issubdtype(data.dtype, np.uint):
## data = data.astype(np.uint)
# get RGB mapped array
image_order = trcalc.guess_order(data.shape)
rgbobj = rgbmap.get_rgbarray(data, order=state.order,
image_order=image_order)
res_np = rgbobj.get_array(state.order)
alpha = self.pipeline.get('alpha')
if alpha is not None:
res_np[..., -1] = alpha
self.pipeline.send(res_np=res_np)
|
pllim/ginga
|
ginga/util/stages/render.py
|
Python
|
bsd-3-clause
| 27,918
|
"""
===============
Gaussian fitter
===============
.. moduleauthor:: Adam Ginsburg <adam.g.ginsburg@gmail.com>
Created 3/17/08
Original version available at http://code.google.com/p/agpy/source/browse/trunk/agpy/gaussfitter.py
(the version below uses a Class instead of independent functions)
"""
from __future__ import print_function
import numpy
from numpy.ma import median
from numpy import pi
from pyspeckit.mpfit import mpfit
import matplotlib.cbook as mpcb
from . import mpfit_messages
from . import model
from astropy.extern.six.moves import xrange
class gaussian_fitter(model.SpectralModel):
"""
A rather complicated Gaussian fitter class. Inherits from, but overrides
most components of, :mod:`model.SpectralModel`
"""
def __init__(self):
self.npars = 3
self.npeaks = 1
self.onepeakgaussfit = self._fourparfitter(self.onepeakgaussian)
def __call__(self,*args,**kwargs):
return self.multigaussfit(*args,**kwargs)
def onepeakgaussian(self, x,H,A,dx,w):
"""
Returns a 1-dimensional gaussian of form
H+A*numpy.exp(-(x-dx)**2/(2*w**2))
[height,amplitude,center,width]
"""
x = numpy.array(x) # make sure xarr is no longer a spectroscopic axis
return H+A*numpy.exp(-(x-dx)**2/(2*w**2))
def multipeakgaussian(self, x, pars):
"""
Returns flux at position x due to contributions from multiple Gaussians.
"""
x = numpy.array(x) # make sure xarr is no longer a spectroscopic axis
pars = numpy.reshape(pars, (len(pars) / 3, 3))
result = 0
for fit in pars: result += self.onepeakgaussian(x, 0, fit[0], fit[1], fit[2])
return result
def slope(self, x):
"""
Return slope at position x for multicomponent Gaussian fit. Need this in measurements class for
finding the FWHM of multicomponent lines whose centroids are not identical.
"""
pars = numpy.reshape(self.mpp, (len(self.mpp) / 3, 3))
result = 0
for fit in pars:
result += self.onepeakgaussian(x, 0, fit[0], fit[1], fit[2]) * (-2. * (x - fit[1]) / 2. / fit[2]**2)
return result
def n_gaussian(self, pars=None,a=None,dx=None,sigma=None):
"""
Returns a function that sums over N gaussians, where N is the length of
a,dx,sigma *OR* N = len(pars) / 3
The background "height" is assumed to be zero (you must "baseline" your
spectrum before fitting)
pars - a list with len(pars) = 3n, assuming a,dx,sigma repeated
dx - offset (velocity center) values
sigma - line widths
a - amplitudes
"""
if len(pars) % 3 == 0:
a = [pars[ii] for ii in xrange(0,len(pars),3)]
dx = [pars[ii] for ii in xrange(1,len(pars),3)]
sigma = [pars[ii] for ii in xrange(2,len(pars),3)]
elif not(len(dx) == len(sigma) == len(a)):
raise ValueError("Wrong array lengths! dx: %i sigma: %i a: %i" % (len(dx),len(sigma),len(a)))
def g(x):
v = numpy.zeros(len(x))
for ii in range(len(pars)/3):
v += a[ii] * numpy.exp( - ( x - dx[ii] )**2 / (2.0*sigma[ii]**2) )
return v
return g
def multigaussfit(self, xax, data, npeaks=1, err=None, params=[1,0,1],
fixed=[False,False,False], limitedmin=[False,False,True],
limitedmax=[False,False,False], minpars=[0,0,0], maxpars=[0,0,0],
quiet=True, shh=True, veryverbose=False, negamp=None,
tied = ['', '', ''], parinfo=None, debug=False, **kwargs):
"""
An improvement on onepeakgaussfit. Lets you fit multiple gaussians.
Inputs:
xax - x axis
data - y axis
npeaks - How many gaussians to fit? Default 1 (this could supersede onepeakgaussfit)
err - error corresponding to data
These parameters need to have length = 3*npeaks. If npeaks > 1 and length = 3, they will
be replicated npeaks times, otherwise they will be reset to defaults:
params - Fit parameters: [amplitude, offset, width] * npeaks
If len(params) % 3 == 0, npeaks will be set to len(params) / 3
fixed - Is parameter fixed?
limitedmin/minpars - set lower limits on each parameter (default: width>0)
limitedmax/maxpars - set upper limits on each parameter
tied - link parameters together
quiet - should MPFIT output each iteration?
shh - output final parameters?
kwargs are passed to mpfit
Returns:
Fit parameters
Model
Fit errors
chi2
"""
if len(params) != npeaks and (len(params) / 3) > npeaks:
self.npeaks = len(params) / 3
else:
self.npeaks = npeaks
if isinstance(params,numpy.ndarray): params=params.tolist()
# make sure all various things are the right length; if they're not, fix them using the defaults
# multiformaldehydefit should process negamp directly if kwargs.has_key('negamp') is False: kwargs['negamp'] = None
pardict = {"params":params,"fixed":fixed,"limitedmin":limitedmin,"limitedmax":limitedmax,"minpars":minpars,"maxpars":maxpars,"tied":tied}
for parlistname in pardict:
parlist = pardict[parlistname]
if len(parlist) != 3*self.npeaks:
# if you leave the defaults, or enter something that can be multiplied by 3 to get to the
# right number of formaldehydeians, it will just replicate
if veryverbose: print("Correcting length of parameter %s" % parlistname)
if len(parlist) == 3:
parlist *= self.npeaks
elif parlistname=="params":
parlist[:] = [1,0,1] * self.npeaks
elif parlistname=="fixed":
parlist[:] = [False,False,False] * self.npeaks
elif parlistname=="limitedmax":
if negamp is None: parlist[:] = [False,False,False] * self.npeaks
elif negamp is False: parlist[:] = [False,False,False] * self.npeaks
else: parlist[:] = [True,False,False] * self.npeaks
elif parlistname=="limitedmin":
if negamp is None: parlist[:] = [False,False,True] * self.npeaks # Lines can't have negative width!
elif negamp is False: parlist[:] = [True,False,True] * self.npeaks
else: parlist[:] = [False,False,True] * self.npeaks
elif parlistname=="minpars" or parlistname=="maxpars":
parlist[:] = [0,0,0] * self.npeaks
elif parlistname=="tied":
parlist[:] = ['','',''] * self.npeaks
# mpfit doesn't recognize negamp, so get rid of it now that we're done setting limitedmin/max and min/maxpars
#if kwargs.has_key('negamp'): kwargs.pop('negamp')
def mpfitfun(x,y,err):
if err is None:
def f(p,fjac=None): return [0,(y-self.n_gaussian(pars=p)(x))]
else:
def f(p,fjac=None): return [0,(y-self.n_gaussian(pars=p)(x))/err]
return f
if xax is None:
xax = numpy.arange(len(data))
parnames = {0:"AMPLITUDE",1:"SHIFT",2:"WIDTH"}
if parinfo is None:
parinfo = [ {'n':ii, 'value':params[ii],
'limits':[minpars[ii],maxpars[ii]],
'limited':[limitedmin[ii],limitedmax[ii]], 'fixed':fixed[ii],
'parname':parnames[ii%3]+str(ii/3), 'error':ii, 'tied':tied[ii]}
for ii in xrange(len(params)) ]
if veryverbose:
print("GUESSES: ")
print("\n".join(["%s: %s" % (p['parname'],p['value']) for p in parinfo]))
if debug:
for p in parinfo: print(p)
mp = mpfit(mpfitfun(xax,data,err),parinfo=parinfo,quiet=quiet,**kwargs)
mpp = mp.params
if mp.perror is not None: mpperr = mp.perror
else: mpperr = mpp*0
chi2 = mp.fnorm
if mp.status == 0:
raise Exception(mp.errmsg)
if not shh:
print("Fit status: ",mp.status)
print("Fit error message: ",mp.errmsg)
print("Fit message: ",mpfit_messages[mp.status])
print("Final fit values: ")
for i,p in enumerate(mpp):
parinfo[i]['value'] = p
print(parinfo[i]['parname'],p," +/- ",mpperr[i])
print("Chi2: ",mp.fnorm," Reduced Chi2: ",mp.fnorm/len(data)," DOF:",len(data)-len(mpp))
self.mp = mp
self.mpp = mpp
self.mpperr = mpperr
self.model = self.n_gaussian(pars=mpp)(xax)
return mpp,self.n_gaussian(pars=mpp)(xax),mpperr,chi2
def annotations(self):
label_list = [(
"$A(%i)$=%6.4g $\\pm$ %6.4g" % (jj,self.mpp[0+jj*self.npars],self.mpperr[0+jj*self.npars]),
"$x(%i)$=%6.4g $\\pm$ %6.4g" % (jj,self.mpp[1+jj*self.npars],self.mpperr[1+jj*self.npars]),
"$\\sigma(%i)$=%6.4g $\\pm$ %6.4g" % (jj,self.mpp[2+jj*self.npars],self.mpperr[2+jj*self.npars])
) for jj in range(self.npeaks)]
labels = tuple(mpcb.flatten(label_list))
return labels
def components(self,xarr,modelpars):
modelcomponents = [ self.onepeakgaussian(xarr,
0.0,modelpars[3*i],modelpars[3*i+1],modelpars[3*i+2]) for i in range(self.npeaks)]
return modelcomponents
def integral(self, modelpars):
"""
Return the integral of the individual components (ignoring height)
"""
return self.model.sum()
# this is the "proper" way to do it, but the above line was used for compatibility with other models
integ = 0
if len(modelpars) % 3 == 0:
for amp,cen,width in numpy.reshape(modelpars,[len(modelpars)/3,3]):
integ += amp*width*numpy.sqrt(2.0*numpy.pi)
return integ
n_modelfunc = n_gaussian
|
vlas-sokolov/pyspeckit
|
pyspeckit/spectrum/models/gaussfitter.py
|
Python
|
mit
| 10,316
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.