text stringlengths 81 112k |
|---|
save stock_xdxr
Arguments:
engine {[type]} -- [description]
Keyword Arguments:
client {[type]} -- [description] (default: {DATABASE})
def QA_SU_save_stock_xdxr(engine, client=DATABASE):
"""save stock_xdxr
Arguments:
engine {[type]} -- [description]
Keyword Arguments:
client {[type]} -- [description] (default: {DATABASE})
"""
engine = select_save_engine(engine)
engine.QA_SU_save_stock_xdxr(client=client) |
save stock_block
Arguments:
engine {[type]} -- [description]
Keyword Arguments:
client {[type]} -- [description] (default: {DATABASE})
def QA_SU_save_stock_block(engine, client=DATABASE):
"""save stock_block
Arguments:
engine {[type]} -- [description]
Keyword Arguments:
client {[type]} -- [description] (default: {DATABASE})
"""
engine = select_save_engine(engine)
engine.QA_SU_save_stock_block(client=client) |
select save_engine , tushare ts Tushare 使用 Tushare 免费数据接口, tdx 使用通达信数据接口
:param engine: 字符串Str
:param paralleled: 是否并行处理;默认为False
:return: sts means save_tushare_py or stdx means save_tdx_py
def select_save_engine(engine, paralleled=False):
'''
select save_engine , tushare ts Tushare 使用 Tushare 免费数据接口, tdx 使用通达信数据接口
:param engine: 字符串Str
:param paralleled: 是否并行处理;默认为False
:return: sts means save_tushare_py or stdx means save_tdx_py
'''
if engine in ['tushare', 'ts', 'Tushare']:
return sts
elif engine in ['tdx']:
if paralleled:
return stdx_parallelism
else:
return stdx
elif engine in ['gm', 'goldenminer']:
return sgm
elif engine in ['jq', 'joinquant']:
return sjq
else:
print('QA Error QASU.main.py call select_save_engine with parameter %s is None of thshare, ts, Thshare, or tdx', engine) |
获取股票日线'
Returns:
[type] -- [description]
感谢@几何大佬的提示
https://docs.mongodb.com/manual/tutorial/project-fields-from-query-results/#return-the-specified-fields-and-the-id-field-only
def QA_fetch_stock_day(code, start, end, format='numpy', frequence='day', collections=DATABASE.stock_day):
"""'获取股票日线'
Returns:
[type] -- [description]
感谢@几何大佬的提示
https://docs.mongodb.com/manual/tutorial/project-fields-from-query-results/#return-the-specified-fields-and-the-id-field-only
"""
start = str(start)[0:10]
end = str(end)[0:10]
#code= [code] if isinstance(code,str) else code
# code checking
code = QA_util_code_tolist(code)
if QA_util_date_valid(end):
cursor = collections.find({
'code': {'$in': code}, "date_stamp": {
"$lte": QA_util_date_stamp(end),
"$gte": QA_util_date_stamp(start)}}, {"_id": 0}, batch_size=10000)
#res=[QA_util_dict_remove_key(data, '_id') for data in cursor]
res = pd.DataFrame([item for item in cursor])
try:
res = res.assign(volume=res.vol, date=pd.to_datetime(
res.date)).drop_duplicates((['date', 'code'])).query('volume>1').set_index('date', drop=False)
res = res.ix[:, ['code', 'open', 'high', 'low',
'close', 'volume', 'amount', 'date']]
except:
res = None
if format in ['P', 'p', 'pandas', 'pd']:
return res
elif format in ['json', 'dict']:
return QA_util_to_json_from_pandas(res)
# 多种数据格式
elif format in ['n', 'N', 'numpy']:
return numpy.asarray(res)
elif format in ['list', 'l', 'L']:
return numpy.asarray(res).tolist()
else:
print("QA Error QA_fetch_stock_day format parameter %s is none of \"P, p, pandas, pd , json, dict , n, N, numpy, list, l, L, !\" " % format)
return None
else:
QA_util_log_info(
'QA Error QA_fetch_stock_day data parameter start=%s end=%s is not right' % (start, end)) |
获取股票分钟线
def QA_fetch_stock_min(code, start, end, format='numpy', frequence='1min', collections=DATABASE.stock_min):
'获取股票分钟线'
if frequence in ['1min', '1m']:
frequence = '1min'
elif frequence in ['5min', '5m']:
frequence = '5min'
elif frequence in ['15min', '15m']:
frequence = '15min'
elif frequence in ['30min', '30m']:
frequence = '30min'
elif frequence in ['60min', '60m']:
frequence = '60min'
else:
print("QA Error QA_fetch_stock_min parameter frequence=%s is none of 1min 1m 5min 5m 15min 15m 30min 30m 60min 60m" % frequence)
__data = []
# code checking
code = QA_util_code_tolist(code)
cursor = collections.find({
'code': {'$in': code}, "time_stamp": {
"$gte": QA_util_time_stamp(start),
"$lte": QA_util_time_stamp(end)
}, 'type': frequence
}, {"_id": 0}, batch_size=10000)
res = pd.DataFrame([item for item in cursor])
try:
res = res.assign(volume=res.vol, datetime=pd.to_datetime(
res.datetime)).query('volume>1').drop_duplicates(['datetime', 'code']).set_index('datetime', drop=False)
# return res
except:
res = None
if format in ['P', 'p', 'pandas', 'pd']:
return res
elif format in ['json', 'dict']:
return QA_util_to_json_from_pandas(res)
# 多种数据格式
elif format in ['n', 'N', 'numpy']:
return numpy.asarray(res)
elif format in ['list', 'l', 'L']:
return numpy.asarray(res).tolist()
else:
print("QA Error QA_fetch_stock_min format parameter %s is none of \"P, p, pandas, pd , json, dict , n, N, numpy, list, l, L, !\" " % format)
return None |
获取股票列表
def QA_fetch_stock_list(collections=DATABASE.stock_list):
'获取股票列表'
return pd.DataFrame([item for item in collections.find()]).drop('_id', axis=1, inplace=False).set_index('code', drop=False) |
获取ETF列表
def QA_fetch_etf_list(collections=DATABASE.etf_list):
'获取ETF列表'
return pd.DataFrame([item for item in collections.find()]).drop('_id', axis=1, inplace=False).set_index('code', drop=False) |
获取指数列表
def QA_fetch_index_list(collections=DATABASE.index_list):
'获取指数列表'
return pd.DataFrame([item for item in collections.find()]).drop('_id', axis=1, inplace=False).set_index('code', drop=False) |
获取股票基本信息 , 已经退市的股票列表
def QA_fetch_stock_terminated(collections=DATABASE.stock_terminated):
'获取股票基本信息 , 已经退市的股票列表'
# 🛠todo 转变成 dataframe 类型数据
return pd.DataFrame([item for item in collections.find()]).drop('_id', axis=1, inplace=False).set_index('code', drop=False) |
purpose:
tushare 股票列表数据库
code,代码
name,名称
industry,所属行业
area,地区
pe,市盈率
outstanding,流通股本(亿)
totals,总股本(亿)
totalAssets,总资产(万)
liquidAssets,流动资产
fixedAssets,固定资产
reserved,公积金
reservedPerShare,每股公积金
esp,每股收益
bvps,每股净资
pb,市净率
timeToMarket,上市日期
undp,未分利润
perundp, 每股未分配
rev,收入同比(%)
profit,利润同比(%)
gpr,毛利率(%)
npr,净利润率(%)
holders,股东人数
add by tauruswang,
:param collections: stock_info_tushare 集合
:return:
def QA_fetch_stock_basic_info_tushare(collections=DATABASE.stock_info_tushare):
'''
purpose:
tushare 股票列表数据库
code,代码
name,名称
industry,所属行业
area,地区
pe,市盈率
outstanding,流通股本(亿)
totals,总股本(亿)
totalAssets,总资产(万)
liquidAssets,流动资产
fixedAssets,固定资产
reserved,公积金
reservedPerShare,每股公积金
esp,每股收益
bvps,每股净资
pb,市净率
timeToMarket,上市日期
undp,未分利润
perundp, 每股未分配
rev,收入同比(%)
profit,利润同比(%)
gpr,毛利率(%)
npr,净利润率(%)
holders,股东人数
add by tauruswang,
:param collections: stock_info_tushare 集合
:return:
'''
'获取股票基本信息'
items = [item for item in collections.find()]
# 🛠todo 转变成 dataframe 类型数据
return items |
获取全市场的某一日的数据
def QA_fetch_stock_full(date, format='numpy', collections=DATABASE.stock_day):
'获取全市场的某一日的数据'
Date = str(date)[0:10]
if QA_util_date_valid(Date) is True:
__data = []
for item in collections.find({
"date_stamp": QA_util_date_stamp(Date)}, batch_size=10000):
__data.append([str(item['code']), float(item['open']), float(item['high']), float(
item['low']), float(item['close']), float(item['vol']), item['date']])
# 多种数据格式
if format in ['n', 'N', 'numpy']:
__data = numpy.asarray(__data)
elif format in ['list', 'l', 'L']:
__data = __data
elif format in ['P', 'p', 'pandas', 'pd']:
__data = DataFrame(__data, columns=[
'code', 'open', 'high', 'low', 'close', 'volume', 'date'])
__data['date'] = pd.to_datetime(__data['date'])
__data = __data.set_index('date', drop=False)
else:
print("QA Error QA_fetch_stock_full format parameter %s is none of \"P, p, pandas, pd , json, dict , n, N, numpy, list, l, L, !\" " % format)
return __data
else:
QA_util_log_info(
'QA Error QA_fetch_stock_full data parameter date=%s not right' % date) |
获取股票分钟线
def QA_fetch_index_min(
code,
start, end,
format='numpy',
frequence='1min',
collections=DATABASE.index_min):
'获取股票分钟线'
if frequence in ['1min', '1m']:
frequence = '1min'
elif frequence in ['5min', '5m']:
frequence = '5min'
elif frequence in ['15min', '15m']:
frequence = '15min'
elif frequence in ['30min', '30m']:
frequence = '30min'
elif frequence in ['60min', '60m']:
frequence = '60min'
__data = []
code = QA_util_code_tolist(code)
cursor = collections.find({
'code': {'$in': code}, "time_stamp": {
"$gte": QA_util_time_stamp(start),
"$lte": QA_util_time_stamp(end)
}, 'type': frequence
}, {"_id": 0}, batch_size=10000)
if format in ['dict', 'json']:
return [data for data in cursor]
# for item in cursor:
__data = pd.DataFrame([item for item in cursor])
__data = __data.assign(datetime=pd.to_datetime(__data['datetime']))
# __data.append([str(item['code']), float(item['open']), float(item['high']), float(
# item['low']), float(item['close']), int(item['up_count']), int(item['down_count']), float(item['vol']), float(item['amount']), item['datetime'], item['time_stamp'], item['date'], item['type']])
# __data = DataFrame(__data, columns=[
# 'code', 'open', 'high', 'low', 'close', 'up_count', 'down_count', 'volume', 'amount', 'datetime', 'time_stamp', 'date', 'type'])
# __data['datetime'] = pd.to_datetime(__data['datetime'])
__data = __data.set_index('datetime', drop=False)
if format in ['numpy', 'np', 'n']:
return numpy.asarray(__data)
elif format in ['list', 'l', 'L']:
return numpy.asarray(__data).tolist()
elif format in ['P', 'p', 'pandas', 'pd']:
return __data |
获取股票分钟线
def QA_fetch_future_min(
code,
start, end,
format='numpy',
frequence='1min',
collections=DATABASE.future_min):
'获取股票分钟线'
if frequence in ['1min', '1m']:
frequence = '1min'
elif frequence in ['5min', '5m']:
frequence = '5min'
elif frequence in ['15min', '15m']:
frequence = '15min'
elif frequence in ['30min', '30m']:
frequence = '30min'
elif frequence in ['60min', '60m']:
frequence = '60min'
__data = []
code = QA_util_code_tolist(code, auto_fill=False)
cursor = collections.find({
'code': {'$in': code}, "time_stamp": {
"$gte": QA_util_time_stamp(start),
"$lte": QA_util_time_stamp(end)
}, 'type': frequence
}, batch_size=10000)
if format in ['dict', 'json']:
return [data for data in cursor]
for item in cursor:
__data.append([str(item['code']), float(item['open']), float(item['high']), float(
item['low']), float(item['close']), float(item['position']), float(item['price']), float(item['trade']),
item['datetime'], item['tradetime'], item['time_stamp'], item['date'], item['type']])
__data = DataFrame(__data, columns=[
'code', 'open', 'high', 'low', 'close', 'position', 'price', 'trade', 'datetime', 'tradetime', 'time_stamp', 'date', 'type'])
__data['datetime'] = pd.to_datetime(__data['datetime'])
__data = __data.set_index('datetime', drop=False)
if format in ['numpy', 'np', 'n']:
return numpy.asarray(__data)
elif format in ['list', 'l', 'L']:
return numpy.asarray(__data).tolist()
elif format in ['P', 'p', 'pandas', 'pd']:
return __data |
获取期货列表
def QA_fetch_future_list(collections=DATABASE.future_list):
'获取期货列表'
return pd.DataFrame([item for item in collections.find()]).drop('_id', axis=1, inplace=False).set_index('code', drop=False) |
仅供存储的ctp tick使用
Arguments:
code {[type]} -- [description]
Keyword Arguments:
format {str} -- [description] (default: {'pd'})
collections {[type]} -- [description] (default: {DATABASE.ctp_tick})
Returns:
[type] -- [description]
def QA_fetch_ctp_tick(code, start, end, frequence, format='pd', collections=DATABASE.ctp_tick):
"""仅供存储的ctp tick使用
Arguments:
code {[type]} -- [description]
Keyword Arguments:
format {str} -- [description] (default: {'pd'})
collections {[type]} -- [description] (default: {DATABASE.ctp_tick})
Returns:
[type] -- [description]
"""
code = QA_util_code_tolist(code, auto_fill=False)
cursor = collections.find({
'InstrumentID': {'$in': code}, "time_stamp": {
"$gte": QA_util_time_stamp(start),
"$lte": QA_util_time_stamp(end)
}, 'type': frequence
}, {"_id": 0}, batch_size=10000)
hq = pd.DataFrame([data for data in cursor]).replace(1.7976931348623157e+308,
numpy.nan).replace('', numpy.nan).dropna(axis=1)
p1 = hq.loc[:, ['ActionDay', 'AskPrice1', 'AskVolume1', 'AveragePrice', 'BidPrice1',
'BidVolume1', 'HighestPrice', 'InstrumentID', 'LastPrice',
'OpenInterest', 'TradingDay', 'UpdateMillisec',
'UpdateTime', 'Volume']]
p1 = p1.assign(datetime=p1.ActionDay.apply(QA_util_date_int2str)+' '+p1.UpdateTime + (p1.UpdateMillisec/1000000).apply(lambda x: str('%.6f' % x)[1:]),
code=p1.InstrumentID)
p1.datetime = pd.to_datetime(p1.datetime)
return p1.set_index(p1.datetime) |
获取股票除权信息/数据库
def QA_fetch_stock_xdxr(code, format='pd', collections=DATABASE.stock_xdxr):
'获取股票除权信息/数据库'
code = QA_util_code_tolist(code)
data = pd.DataFrame([item for item in collections.find(
{'code': {'$in': code}}, batch_size=10000)]).drop(['_id'], axis=1)
data['date'] = pd.to_datetime(data['date'])
return data.set_index('date', drop=False) |
获取全部实时5档行情的存储结果
def QA_fetch_quotations(date=datetime.date.today(), db=DATABASE):
'获取全部实时5档行情的存储结果'
try:
collections = db.get_collection(
'realtime_{}'.format(date))
data = pd.DataFrame([item for item in collections.find(
{}, {"_id": 0}, batch_size=10000)])
return data.assign(date=pd.to_datetime(data.datetime.apply(lambda x: str(x)[0:10]))).assign(datetime=pd.to_datetime(data.datetime)).set_index(['datetime', 'code'], drop=False).sort_index()
except Exception as e:
raise e |
get the account
Arguments:
query_mes {[type]} -- [description]
Keyword Arguments:
collection {[type]} -- [description] (default: {DATABASE})
Returns:
[type] -- [description]
def QA_fetch_account(message={}, db=DATABASE):
"""get the account
Arguments:
query_mes {[type]} -- [description]
Keyword Arguments:
collection {[type]} -- [description] (default: {DATABASE})
Returns:
[type] -- [description]
"""
collection = DATABASE.account
return [res for res in collection.find(message, {"_id": 0})] |
get the risk message
Arguments:
query_mes {[type]} -- [description]
Keyword Arguments:
collection {[type]} -- [description] (default: {DATABASE})
Returns:
[type] -- [description]
def QA_fetch_risk(message={}, params={"_id": 0, 'assets': 0, 'timeindex': 0, 'totaltimeindex': 0, 'benchmark_assets': 0, 'month_profit': 0}, db=DATABASE):
"""get the risk message
Arguments:
query_mes {[type]} -- [description]
Keyword Arguments:
collection {[type]} -- [description] (default: {DATABASE})
Returns:
[type] -- [description]
"""
collection = DATABASE.risk
return [res for res in collection.find(message, params)] |
get the user
Arguments:
user_cookie : str the unique cookie_id for a user
Keyword Arguments:
db: database for query
Returns:
list --- [ACCOUNT]
def QA_fetch_user(user_cookie, db=DATABASE):
"""
get the user
Arguments:
user_cookie : str the unique cookie_id for a user
Keyword Arguments:
db: database for query
Returns:
list --- [ACCOUNT]
"""
collection = DATABASE.account
return [res for res in collection.find({'user_cookie': user_cookie}, {"_id": 0})] |
get the account
Arguments:
query_mes {[type]} -- [description]
Keyword Arguments:
collection {[type]} -- [description] (default: {DATABASE})
Returns:
[type] -- [description]
def QA_fetch_strategy(message={}, db=DATABASE):
"""get the account
Arguments:
query_mes {[type]} -- [description]
Keyword Arguments:
collection {[type]} -- [description] (default: {DATABASE})
Returns:
[type] -- [description]
"""
collection = DATABASE.strategy
return [res for res in collection.find(message, {"_id": 0})] |
获取某一天龙虎榜数据
def QA_fetch_lhb(date, db=DATABASE):
'获取某一天龙虎榜数据'
try:
collections = db.lhb
return pd.DataFrame([item for item in collections.find(
{'date': date}, {"_id": 0})]).set_index('code', drop=False).sort_index()
except Exception as e:
raise e |
获取专业财务报表
Arguments:
code {[type]} -- [description]
report_date {[type]} -- [description]
Keyword Arguments:
ltype {str} -- [description] (default: {'EN'})
db {[type]} -- [description] (default: {DATABASE})
Raises:
e -- [description]
Returns:
pd.DataFrame -- [description]
def QA_fetch_financial_report(code, report_date, ltype='EN', db=DATABASE):
"""获取专业财务报表
Arguments:
code {[type]} -- [description]
report_date {[type]} -- [description]
Keyword Arguments:
ltype {str} -- [description] (default: {'EN'})
db {[type]} -- [description] (default: {DATABASE})
Raises:
e -- [description]
Returns:
pd.DataFrame -- [description]
"""
if isinstance(code, str):
code = [code]
if isinstance(report_date, str):
report_date = [QA_util_date_str2int(report_date)]
elif isinstance(report_date, int):
report_date = [report_date]
elif isinstance(report_date, list):
report_date = [QA_util_date_str2int(item) for item in report_date]
collection = db.financial
num_columns = [item[:3] for item in list(financial_dict.keys())]
CH_columns = [item[3:] for item in list(financial_dict.keys())]
EN_columns = list(financial_dict.values())
#num_columns.extend(['283', '_id', 'code', 'report_date'])
# CH_columns.extend(['283', '_id', 'code', 'report_date'])
#CH_columns = pd.Index(CH_columns)
#EN_columns = list(financial_dict.values())
#EN_columns.extend(['283', '_id', 'code', 'report_date'])
#EN_columns = pd.Index(EN_columns)
try:
if code is not None and report_date is not None:
data = [item for item in collection.find(
{'code': {'$in': code}, 'report_date': {'$in': report_date}}, {"_id": 0}, batch_size=10000)]
elif code is None and report_date is not None:
data = [item for item in collection.find(
{'report_date': {'$in': report_date}}, {"_id": 0}, batch_size=10000)]
elif code is not None and report_date is None:
data = [item for item in collection.find(
{'code': {'$in': code}}, {"_id": 0}, batch_size=10000)]
else:
data = [item for item in collection.find({}, {"_id": 0})]
if len(data) > 0:
res_pd = pd.DataFrame(data)
if ltype in ['CH', 'CN']:
cndict = dict(zip(num_columns, CH_columns))
cndict['283'] = '283'
try:
cndict['284'] = '284'
cndict['285'] = '285'
cndict['286'] = '286'
except:
pass
cndict['code'] = 'code'
cndict['report_date'] = 'report_date'
res_pd.columns = res_pd.columns.map(lambda x: cndict[x])
elif ltype is 'EN':
endict = dict(zip(num_columns, EN_columns))
endict['283'] = '283'
try:
endict['284'] = '284'
endict['285'] = '285'
endict['286'] = '286'
except:
pass
endict['code'] = 'code'
endict['report_date'] = 'report_date'
res_pd.columns = res_pd.columns.map(lambda x: endict[x])
if res_pd.report_date.dtype == numpy.int64:
res_pd.report_date = pd.to_datetime(
res_pd.report_date.apply(QA_util_date_int2str))
else:
res_pd.report_date = pd.to_datetime(res_pd.report_date)
return res_pd.replace(-4.039810335e+34, numpy.nan).set_index(['report_date', 'code'], drop=False)
else:
return None
except Exception as e:
raise e |
获取股票日线
def QA_fetch_stock_divyield(code, start, end=None, format='pd', collections=DATABASE.stock_divyield):
'获取股票日线'
#code= [code] if isinstance(code,str) else code
# code checking
code = QA_util_code_tolist(code)
if QA_util_date_valid(end):
__data = []
cursor = collections.find({
'a_stockcode': {'$in': code}, "dir_dcl_date": {
"$lte": end,
"$gte": start}}, {"_id": 0}, batch_size=10000)
#res=[QA_util_dict_remove_key(data, '_id') for data in cursor]
res = pd.DataFrame([item for item in cursor])
try:
res = res.drop_duplicates(
(['dir_dcl_date', 'a_stockcode']))
res = res.ix[:, ['a_stockcode', 'a_stocksname', 'div_info', 'div_type_code', 'bonus_shr',
'cash_bt', 'cap_shr', 'epsp', 'ps_cr', 'ps_up', 'reg_date', 'dir_dcl_date',
'a_stockcode1', 'ex_divi_date', 'prg']]
except:
res = None
if format in ['P', 'p', 'pandas', 'pd']:
return res
elif format in ['json', 'dict']:
return QA_util_to_json_from_pandas(res)
# 多种数据格式
elif format in ['n', 'N', 'numpy']:
return numpy.asarray(res)
elif format in ['list', 'l', 'L']:
return numpy.asarray(res).tolist()
else:
print("QA Error QA_fetch_stock_divyield format parameter %s is none of \"P, p, pandas, pd , json, dict , n, N, numpy, list, l, L, !\" " % format)
return None
else:
QA_util_log_info(
'QA Error QA_fetch_stock_divyield data parameter start=%s end=%s is not right' % (start, end)) |
save stock_day
保存日线数据
:param client:
:param ui_log: 给GUI qt 界面使用
:param ui_progress: 给GUI qt 界面使用
:param ui_progress_int_value: 给GUI qt 界面使用
def QA_SU_save_stock_day(client=DATABASE, ui_log=None, ui_progress=None):
'''
save stock_day
保存日线数据
:param client:
:param ui_log: 给GUI qt 界面使用
:param ui_progress: 给GUI qt 界面使用
:param ui_progress_int_value: 给GUI qt 界面使用
'''
stock_list = QA_fetch_get_stock_list().code.unique().tolist()
coll_stock_day = client.stock_day
coll_stock_day.create_index(
[("code",
pymongo.ASCENDING),
("date_stamp",
pymongo.ASCENDING)]
)
err = []
def __saving_work(code, coll_stock_day):
try:
QA_util_log_info(
'##JOB01 Now Saving STOCK_DAY==== {}'.format(str(code)),
ui_log
)
# 首选查找数据库 是否 有 这个代码的数据
ref = coll_stock_day.find({'code': str(code)[0:6]})
end_date = str(now_time())[0:10]
# 当前数据库已经包含了这个代码的数据, 继续增量更新
# 加入这个判断的原因是因为如果股票是刚上市的 数据库会没有数据 所以会有负索引问题出现
if ref.count() > 0:
# 接着上次获取的日期继续更新
start_date = ref[ref.count() - 1]['date']
QA_util_log_info(
'UPDATE_STOCK_DAY \n Trying updating {} from {} to {}'
.format(code,
start_date,
end_date),
ui_log
)
if start_date != end_date:
coll_stock_day.insert_many(
QA_util_to_json_from_pandas(
QA_fetch_get_stock_day(
str(code),
QA_util_get_next_day(start_date),
end_date,
'00'
)
)
)
# 当前数据库中没有这个代码的股票数据, 从1990-01-01 开始下载所有的数据
else:
start_date = '1990-01-01'
QA_util_log_info(
'UPDATE_STOCK_DAY \n Trying updating {} from {} to {}'
.format(code,
start_date,
end_date),
ui_log
)
if start_date != end_date:
coll_stock_day.insert_many(
QA_util_to_json_from_pandas(
QA_fetch_get_stock_day(
str(code),
start_date,
end_date,
'00'
)
)
)
except Exception as error0:
print(error0)
err.append(str(code))
for item in range(len(stock_list)):
QA_util_log_info('The {} of Total {}'.format(item, len(stock_list)))
strProgressToLog = 'DOWNLOAD PROGRESS {} {}'.format(
str(float(item / len(stock_list) * 100))[0:4] + '%',
ui_log
)
intProgressToLog = int(float(item / len(stock_list) * 100))
QA_util_log_info(
strProgressToLog,
ui_log=ui_log,
ui_progress=ui_progress,
ui_progress_int_value=intProgressToLog
)
__saving_work(stock_list[item], coll_stock_day)
if len(err) < 1:
QA_util_log_info('SUCCESS save stock day ^_^', ui_log)
else:
QA_util_log_info('ERROR CODE \n ', ui_log)
QA_util_log_info(err, ui_log) |
save stock_week
Keyword Arguments:
client {[type]} -- [description] (default: {DATABASE})
def QA_SU_save_stock_week(client=DATABASE, ui_log=None, ui_progress=None):
"""save stock_week
Keyword Arguments:
client {[type]} -- [description] (default: {DATABASE})
"""
stock_list = QA_fetch_get_stock_list().code.unique().tolist()
coll_stock_week = client.stock_week
coll_stock_week.create_index(
[("code",
pymongo.ASCENDING),
("date_stamp",
pymongo.ASCENDING)]
)
err = []
def __saving_work(code, coll_stock_week):
try:
QA_util_log_info(
'##JOB01 Now Saving STOCK_WEEK==== {}'.format(str(code)),
ui_log=ui_log
)
ref = coll_stock_week.find({'code': str(code)[0:6]})
end_date = str(now_time())[0:10]
if ref.count() > 0:
# 加入这个判断的原因是因为如果股票是刚上市的 数据库会没有数据 所以会有负索引问题出现
start_date = ref[ref.count() - 1]['date']
QA_util_log_info(
'UPDATE_STOCK_WEEK \n Trying updating {} from {} to {}'
.format(code,
start_date,
end_date),
ui_log=ui_log
)
if start_date != end_date:
coll_stock_week.insert_many(
QA_util_to_json_from_pandas(
QA_fetch_get_stock_day(
str(code),
QA_util_get_next_day(start_date),
end_date,
'00',
frequence='week'
)
)
)
else:
start_date = '1990-01-01'
QA_util_log_info(
'UPDATE_STOCK_WEEK \n Trying updating {} from {} to {}'
.format(code,
start_date,
end_date),
ui_log=ui_log
)
if start_date != end_date:
coll_stock_week.insert_many(
QA_util_to_json_from_pandas(
QA_fetch_get_stock_day(
str(code),
start_date,
end_date,
'00',
frequence='week'
)
)
)
except:
err.append(str(code))
for item in range(len(stock_list)):
QA_util_log_info(
'The {} of Total {}'.format(item,
len(stock_list)),
ui_log=ui_log
)
strProgress = 'DOWNLOAD PROGRESS {} '.format(
str(float(item / len(stock_list) * 100))[0:4] + '%'
)
intProgress = int(float(item / len(stock_list) * 100))
QA_util_log_info(
strProgress,
ui_log=ui_log,
ui_progress=ui_progress,
ui_progress_int_value=intProgress
)
__saving_work(stock_list[item], coll_stock_week)
if len(err) < 1:
QA_util_log_info('SUCCESS', ui_log=ui_log)
else:
QA_util_log_info(' ERROR CODE \n ', ui_log=ui_log)
QA_util_log_info(err, ui_log=ui_log) |
[summary]
Keyword Arguments:
client {[type]} -- [description] (default: {DATABASE})
def QA_SU_save_stock_xdxr(client=DATABASE, ui_log=None, ui_progress=None):
"""[summary]
Keyword Arguments:
client {[type]} -- [description] (default: {DATABASE})
"""
stock_list = QA_fetch_get_stock_list().code.unique().tolist()
# client.drop_collection('stock_xdxr')
try:
coll = client.stock_xdxr
coll.create_index(
[('code',
pymongo.ASCENDING),
('date',
pymongo.ASCENDING)],
unique=True
)
except:
client.drop_collection('stock_xdxr')
coll = client.stock_xdxr
coll.create_index(
[('code',
pymongo.ASCENDING),
('date',
pymongo.ASCENDING)],
unique=True
)
err = []
def __saving_work(code, coll):
QA_util_log_info(
'##JOB02 Now Saving XDXR INFO ==== {}'.format(str(code)),
ui_log=ui_log
)
try:
coll.insert_many(
QA_util_to_json_from_pandas(QA_fetch_get_stock_xdxr(str(code))),
ordered=False
)
except:
err.append(str(code))
for i_ in range(len(stock_list)):
QA_util_log_info(
'The {} of Total {}'.format(i_,
len(stock_list)),
ui_log=ui_log
)
strLogInfo = 'DOWNLOAD PROGRESS {} '.format(
str(float(i_ / len(stock_list) * 100))[0:4] + '%'
)
intLogProgress = int(float(i_ / len(stock_list) * 100))
QA_util_log_info(
strLogInfo,
ui_log=ui_log,
ui_progress=ui_progress,
ui_progress_int_value=intLogProgress
)
__saving_work(stock_list[i_], coll) |
save stock_min
Keyword Arguments:
client {[type]} -- [description] (default: {DATABASE})
def QA_SU_save_stock_min(client=DATABASE, ui_log=None, ui_progress=None):
"""save stock_min
Keyword Arguments:
client {[type]} -- [description] (default: {DATABASE})
"""
stock_list = QA_fetch_get_stock_list().code.unique().tolist()
coll = client.stock_min
coll.create_index(
[
('code',
pymongo.ASCENDING),
('time_stamp',
pymongo.ASCENDING),
('date_stamp',
pymongo.ASCENDING)
]
)
err = []
def __saving_work(code, coll):
QA_util_log_info(
'##JOB03 Now Saving STOCK_MIN ==== {}'.format(str(code)),
ui_log=ui_log
)
try:
for type in ['1min', '5min', '15min', '30min', '60min']:
ref_ = coll.find({'code': str(code)[0:6], 'type': type})
end_time = str(now_time())[0:19]
if ref_.count() > 0:
start_time = ref_[ref_.count() - 1]['datetime']
QA_util_log_info(
'##JOB03.{} Now Saving {} from {} to {} =={} '.format(
['1min',
'5min',
'15min',
'30min',
'60min'].index(type),
str(code),
start_time,
end_time,
type
),
ui_log=ui_log
)
if start_time != end_time:
__data = QA_fetch_get_stock_min(
str(code),
start_time,
end_time,
type
)
if len(__data) > 1:
coll.insert_many(
QA_util_to_json_from_pandas(__data)[1::]
)
else:
start_time = '2015-01-01'
QA_util_log_info(
'##JOB03.{} Now Saving {} from {} to {} =={} '.format(
['1min',
'5min',
'15min',
'30min',
'60min'].index(type),
str(code),
start_time,
end_time,
type
),
ui_log=ui_log
)
if start_time != end_time:
__data = QA_fetch_get_stock_min(
str(code),
start_time,
end_time,
type
)
if len(__data) > 1:
coll.insert_many(
QA_util_to_json_from_pandas(__data)
)
except Exception as e:
QA_util_log_info(e, ui_log=ui_log)
err.append(code)
QA_util_log_info(err, ui_log=ui_log)
executor = ThreadPoolExecutor(max_workers=4)
# executor.map((__saving_work, stock_list[i_], coll),URLS)
res = {
executor.submit(__saving_work,
stock_list[i_],
coll)
for i_ in range(len(stock_list))
}
count = 0
for i_ in concurrent.futures.as_completed(res):
QA_util_log_info(
'The {} of Total {}'.format(count,
len(stock_list)),
ui_log=ui_log
)
strProgress = 'DOWNLOAD PROGRESS {} '.format(
str(float(count / len(stock_list) * 100))[0:4] + '%'
)
intProgress = int(count / len(stock_list) * 10000.0)
QA_util_log_info(
strProgress,
ui_log,
ui_progress=ui_progress,
ui_progress_int_value=intProgress
)
count = count + 1
if len(err) < 1:
QA_util_log_info('SUCCESS', ui_log=ui_log)
else:
QA_util_log_info(' ERROR CODE \n ', ui_log=ui_log)
QA_util_log_info(err, ui_log=ui_log) |
save index_day
Keyword Arguments:
client {[type]} -- [description] (default: {DATABASE})
def QA_SU_save_index_day(client=DATABASE, ui_log=None, ui_progress=None):
"""save index_day
Keyword Arguments:
client {[type]} -- [description] (default: {DATABASE})
"""
__index_list = QA_fetch_get_stock_list('index')
coll = client.index_day
coll.create_index(
[('code',
pymongo.ASCENDING),
('date_stamp',
pymongo.ASCENDING)]
)
err = []
def __saving_work(code, coll):
try:
ref_ = coll.find({'code': str(code)[0:6]})
end_time = str(now_time())[0:10]
if ref_.count() > 0:
start_time = ref_[ref_.count() - 1]['date']
QA_util_log_info(
'##JOB04 Now Saving INDEX_DAY==== \n Trying updating {} from {} to {}'
.format(code,
start_time,
end_time),
ui_log=ui_log
)
if start_time != end_time:
coll.insert_many(
QA_util_to_json_from_pandas(
QA_fetch_get_index_day(
str(code),
QA_util_get_next_day(start_time),
end_time
)
)
)
else:
try:
start_time = '1990-01-01'
QA_util_log_info(
'##JOB04 Now Saving INDEX_DAY==== \n Trying updating {} from {} to {}'
.format(code,
start_time,
end_time),
ui_log=ui_log
)
coll.insert_many(
QA_util_to_json_from_pandas(
QA_fetch_get_index_day(
str(code),
start_time,
end_time
)
)
)
except:
start_time = '2009-01-01'
QA_util_log_info(
'##JOB04 Now Saving INDEX_DAY==== \n Trying updating {} from {} to {}'
.format(code,
start_time,
end_time),
ui_log=ui_log
)
coll.insert_many(
QA_util_to_json_from_pandas(
QA_fetch_get_index_day(
str(code),
start_time,
end_time
)
)
)
except Exception as e:
QA_util_log_info(e, ui_log=ui_log)
err.append(str(code))
QA_util_log_info(err, ui_log=ui_log)
for i_ in range(len(__index_list)):
# __saving_work('000001')
QA_util_log_info(
'The {} of Total {}'.format(i_,
len(__index_list)),
ui_log=ui_log
)
strLogProgress = 'DOWNLOAD PROGRESS {} '.format(
str(float(i_ / len(__index_list) * 100))[0:4] + '%'
)
intLogProgress = int(float(i_ / len(__index_list) * 10000.0))
QA_util_log_info(
strLogProgress,
ui_log=ui_log,
ui_progress=ui_progress,
ui_progress_int_value=intLogProgress
)
__saving_work(__index_list.index[i_][0], coll)
if len(err) < 1:
QA_util_log_info('SUCCESS', ui_log=ui_log)
else:
QA_util_log_info(' ERROR CODE \n ', ui_log=ui_log)
QA_util_log_info(err, ui_log=ui_log) |
save index_min
Keyword Arguments:
client {[type]} -- [description] (default: {DATABASE})
def QA_SU_save_index_min(client=DATABASE, ui_log=None, ui_progress=None):
"""save index_min
Keyword Arguments:
client {[type]} -- [description] (default: {DATABASE})
"""
__index_list = QA_fetch_get_stock_list('index')
coll = client.index_min
coll.create_index(
[
('code',
pymongo.ASCENDING),
('time_stamp',
pymongo.ASCENDING),
('date_stamp',
pymongo.ASCENDING)
]
)
err = []
def __saving_work(code, coll):
QA_util_log_info(
'##JOB05 Now Saving Index_MIN ==== {}'.format(str(code)),
ui_log=ui_log
)
try:
for type in ['1min', '5min', '15min', '30min', '60min']:
ref_ = coll.find({'code': str(code)[0:6], 'type': type})
end_time = str(now_time())[0:19]
if ref_.count() > 0:
start_time = ref_[ref_.count() - 1]['datetime']
QA_util_log_info(
'##JOB05.{} Now Saving {} from {} to {} =={} '.format(
['1min',
'5min',
'15min',
'30min',
'60min'].index(type),
str(code),
start_time,
end_time,
type
),
ui_log=ui_log
)
if start_time != end_time:
__data = QA_fetch_get_index_min(
str(code),
start_time,
end_time,
type
)
if len(__data) > 1:
coll.insert_many(
QA_util_to_json_from_pandas(__data[1::])
)
else:
start_time = '2015-01-01'
QA_util_log_info(
'##JOB05.{} Now Saving {} from {} to {} =={} '.format(
['1min',
'5min',
'15min',
'30min',
'60min'].index(type),
str(code),
start_time,
end_time,
type
),
ui_log=ui_log
)
if start_time != end_time:
__data = QA_fetch_get_index_min(
str(code),
start_time,
end_time,
type
)
if len(__data) > 1:
coll.insert_many(
QA_util_to_json_from_pandas(__data)
)
except:
err.append(code)
executor = ThreadPoolExecutor(max_workers=4)
res = {
executor.submit(__saving_work,
__index_list.index[i_][0],
coll)
for i_ in range(len(__index_list))
} # multi index ./.
count = 0
for i_ in concurrent.futures.as_completed(res):
strLogProgress = 'DOWNLOAD PROGRESS {} '.format(
str(float(count / len(__index_list) * 100))[0:4] + '%'
)
intLogProgress = int(float(count / len(__index_list) * 10000.0))
QA_util_log_info(
'The {} of Total {}'.format(count,
len(__index_list)),
ui_log=ui_log
)
QA_util_log_info(
strLogProgress,
ui_log=ui_log,
ui_progress=ui_progress,
ui_progress_int_value=intLogProgress
)
count = count + 1
if len(err) < 1:
QA_util_log_info('SUCCESS', ui_log=ui_log)
else:
QA_util_log_info(' ERROR CODE \n ', ui_log=ui_log)
QA_util_log_info(err, ui_log=ui_log) |
save stock_list
Keyword Arguments:
client {[type]} -- [description] (default: {DATABASE})
def QA_SU_save_stock_list(client=DATABASE, ui_log=None, ui_progress=None):
"""save stock_list
Keyword Arguments:
client {[type]} -- [description] (default: {DATABASE})
"""
client.drop_collection('stock_list')
coll = client.stock_list
coll.create_index('code')
try:
# 🛠todo 这个应该是第一个任务 JOB01, 先更新股票列表!!
QA_util_log_info(
'##JOB08 Now Saving STOCK_LIST ====',
ui_log=ui_log,
ui_progress=ui_progress,
ui_progress_int_value=5000
)
stock_list_from_tdx = QA_fetch_get_stock_list()
pandas_data = QA_util_to_json_from_pandas(stock_list_from_tdx)
coll.insert_many(pandas_data)
QA_util_log_info(
"完成股票列表获取",
ui_log=ui_log,
ui_progress=ui_progress,
ui_progress_int_value=10000
)
except Exception as e:
QA_util_log_info(e, ui_log=ui_log)
print(" Error save_tdx.QA_SU_save_stock_list exception!")
pass |
save etf_list
Keyword Arguments:
client {[type]} -- [description] (default: {DATABASE})
def QA_SU_save_etf_list(client=DATABASE, ui_log=None, ui_progress=None):
"""save etf_list
Keyword Arguments:
client {[type]} -- [description] (default: {DATABASE})
"""
try:
QA_util_log_info(
'##JOB16 Now Saving ETF_LIST ====',
ui_log=ui_log,
ui_progress=ui_progress,
ui_progress_int_value=5000
)
etf_list_from_tdx = QA_fetch_get_stock_list(type_="etf")
pandas_data = QA_util_to_json_from_pandas(etf_list_from_tdx)
if len(pandas_data) > 0:
# 获取到数据后才进行drop collection 操作
client.drop_collection('etf_list')
coll = client.etf_list
coll.create_index('code')
coll.insert_many(pandas_data)
QA_util_log_info(
"完成ETF列表获取",
ui_log=ui_log,
ui_progress=ui_progress,
ui_progress_int_value=10000
)
except Exception as e:
QA_util_log_info(e, ui_log=ui_log)
print(" Error save_tdx.QA_SU_save_etf_list exception!")
pass |
save stock_block
Keyword Arguments:
client {[type]} -- [description] (default: {DATABASE})
def QA_SU_save_stock_block(client=DATABASE, ui_log=None, ui_progress=None):
"""save stock_block
Keyword Arguments:
client {[type]} -- [description] (default: {DATABASE})
"""
client.drop_collection('stock_block')
coll = client.stock_block
coll.create_index('code')
try:
QA_util_log_info(
'##JOB09 Now Saving STOCK_BlOCK ====',
ui_log=ui_log,
ui_progress=ui_progress,
ui_progress_int_value=5000
)
coll.insert_many(
QA_util_to_json_from_pandas(QA_fetch_get_stock_block('tdx'))
)
QA_util_log_info(
'tdx Block ====',
ui_log=ui_log,
ui_progress=ui_progress,
ui_progress_int_value=5000
)
# 🛠todo fixhere here 获取同花顺板块, 还是调用tdx的
coll.insert_many(
QA_util_to_json_from_pandas(QA_fetch_get_stock_block('ths'))
)
QA_util_log_info(
'ths Block ====',
ui_log=ui_log,
ui_progress=ui_progress,
ui_progress_int_value=8000
)
QA_util_log_info(
'完成股票板块获取=',
ui_log=ui_log,
ui_progress=ui_progress,
ui_progress_int_value=10000
)
except Exception as e:
QA_util_log_info(e, ui_log=ui_log)
print(" Error save_tdx.QA_SU_save_stock_block exception!")
pass |
save stock_info
Keyword Arguments:
client {[type]} -- [description] (default: {DATABASE})
def QA_SU_save_stock_info(client=DATABASE, ui_log=None, ui_progress=None):
"""save stock_info
Keyword Arguments:
client {[type]} -- [description] (default: {DATABASE})
"""
client.drop_collection('stock_info')
stock_list = QA_fetch_get_stock_list().code.unique().tolist()
coll = client.stock_info
coll.create_index('code')
err = []
def __saving_work(code, coll):
QA_util_log_info(
'##JOB10 Now Saving STOCK INFO ==== {}'.format(str(code)),
ui_log=ui_log
)
try:
coll.insert_many(
QA_util_to_json_from_pandas(QA_fetch_get_stock_info(str(code)))
)
except:
err.append(str(code))
for i_ in range(len(stock_list)):
# __saving_work('000001')
strLogProgress = 'DOWNLOAD PROGRESS {} '.format(
str(float(i_ / len(stock_list) * 100))[0:4] + '%'
)
intLogProgress = int(float(i_ / len(stock_list) * 10000.0))
QA_util_log_info('The {} of Total {}'.format(i_, len(stock_list)))
QA_util_log_info(
strLogProgress,
ui_log=ui_log,
ui_progress=ui_progress,
ui_progress_int_value=intLogProgress
)
__saving_work(stock_list[i_], coll)
if len(err) < 1:
QA_util_log_info('SUCCESS', ui_log=ui_log)
else:
QA_util_log_info(' ERROR CODE \n ', ui_log=ui_log)
QA_util_log_info(err, ui_log=ui_log) |
save stock_transaction
Keyword Arguments:
client {[type]} -- [description] (default: {DATABASE})
def QA_SU_save_stock_transaction(
client=DATABASE,
ui_log=None,
ui_progress=None
):
"""save stock_transaction
Keyword Arguments:
client {[type]} -- [description] (default: {DATABASE})
"""
stock_list = QA_fetch_get_stock_list().code.unique().tolist()
coll = client.stock_transaction
coll.create_index('code')
err = []
def __saving_work(code):
QA_util_log_info(
'##JOB11 Now Saving STOCK_TRANSACTION ==== {}'.format(str(code)),
ui_log=ui_log
)
try:
coll.insert_many(
QA_util_to_json_from_pandas(
# 🛠todo str(stock_list[code]) 参数不对?
QA_fetch_get_stock_transaction(
str(code),
'1990-01-01',
str(now_time())[0:10]
)
)
)
except:
err.append(str(code))
for i_ in range(len(stock_list)):
# __saving_work('000001')
QA_util_log_info(
'The {} of Total {}'.format(i_,
len(stock_list)),
ui_log=ui_log
)
strLogProgress = 'DOWNLOAD PROGRESS {} '.format(
str(float(i_ / len(stock_list) * 100))[0:4] + '%'
)
intLogProgress = int(float(i_ / len(stock_list) * 10000.0))
QA_util_log_info(
strLogProgress,
ui_log=ui_log,
ui_progress=ui_progress,
ui_progress_int_value=intLogProgress
)
__saving_work(stock_list[i_])
if len(err) < 1:
QA_util_log_info('SUCCESS', ui_log=ui_log)
else:
QA_util_log_info(' ERROR CODE \n ', ui_log=ui_log)
QA_util_log_info(err, ui_log=ui_log) |
:param client:
:return:
def QA_SU_save_option_commodity_day(
client=DATABASE,
ui_log=None,
ui_progress=None
):
'''
:param client:
:return:
'''
_save_option_commodity_cu_day(
client=client,
ui_log=ui_log,
ui_progress=ui_progress
)
_save_option_commodity_m_day(
client=client,
ui_log=ui_log,
ui_progress=ui_progress
)
_save_option_commodity_sr_day(
client=client,
ui_log=ui_log,
ui_progress=ui_progress
)
_save_option_commodity_ru_day(
client=client,
ui_log=ui_log,
ui_progress=ui_progress
)
_save_option_commodity_cf_day(
client=client,
ui_log=ui_log,
ui_progress=ui_progress
)
_save_option_commodity_c_day(
client=client,
ui_log=ui_log,
ui_progress=ui_progress
) |
:param client:
:return:
def QA_SU_save_option_commodity_min(
client=DATABASE,
ui_log=None,
ui_progress=None
):
'''
:param client:
:return:
'''
# 测试中发现, 一起回去,容易出现错误,每次获取一个品种后 ,更换服务ip继续获取 ?
_save_option_commodity_cu_min(
client=client,
ui_log=ui_log,
ui_progress=ui_progress
)
_save_option_commodity_sr_min(
client=client,
ui_log=ui_log,
ui_progress=ui_progress
)
_save_option_commodity_m_min(
client=client,
ui_log=ui_log,
ui_progress=ui_progress
)
_save_option_commodity_ru_min(
client=client,
ui_log=ui_log,
ui_progress=ui_progress
)
_save_option_commodity_cf_min(
client=client,
ui_log=ui_log,
ui_progress=ui_progress
)
_save_option_commodity_c_min(
client=client,
ui_log=ui_log,
ui_progress=ui_progress
) |
:param client:
:return:
def QA_SU_save_option_min(client=DATABASE, ui_log=None, ui_progress=None):
'''
:param client:
:return:
'''
option_contract_list = QA_fetch_get_option_contract_time_to_market()
coll_option_min = client.option_day_min
coll_option_min.create_index(
[("code",
pymongo.ASCENDING),
("date_stamp",
pymongo.ASCENDING)]
)
err = []
# 索引 code
err = []
def __saving_work(code, coll):
QA_util_log_info(
'##JOB13 Now Saving Option 50ETF MIN ==== {}'.format(str(code)),
ui_log=ui_log
)
try:
for type in ['1min', '5min', '15min', '30min', '60min']:
ref_ = coll.find({'code': str(code)[0:8], 'type': type})
end_time = str(now_time())[0:19]
if ref_.count() > 0:
start_time = ref_[ref_.count() - 1]['datetime']
QA_util_log_info(
'##JOB13.{} Now Saving Option 50ETF {} from {} to {} =={} '
.format(
['1min',
'5min',
'15min',
'30min',
'60min'].index(type),
str(code),
start_time,
end_time,
type
),
ui_log=ui_log
)
if start_time != end_time:
__data = QA_fetch_get_future_min(
str(code),
start_time,
end_time,
type
)
if len(__data) > 1:
QA_util_log_info(
" 写入 新增历史合约记录数 {} ".format(len(__data))
)
coll.insert_many(
QA_util_to_json_from_pandas(__data[1::])
)
else:
start_time = '2015-01-01'
QA_util_log_info(
'##JOB13.{} Now Option 50ETF {} from {} to {} =={} '
.format(
['1min',
'5min',
'15min',
'30min',
'60min'].index(type),
str(code),
start_time,
end_time,
type
),
ui_log=ui_log
)
if start_time != end_time:
__data = QA_fetch_get_future_min(
str(code),
start_time,
end_time,
type
)
if len(__data) > 1:
QA_util_log_info(
" 写入 新增合约记录数 {} ".format(len(__data))
)
coll.insert_many(
QA_util_to_json_from_pandas(__data)
)
except:
err.append(code)
executor = ThreadPoolExecutor(max_workers=4)
res = {
executor.submit(
__saving_work,
option_contract_list[i_]["code"],
coll_option_min
)
for i_ in range(len(option_contract_list))
} # multi index ./.
count = 0
for i_ in concurrent.futures.as_completed(res):
QA_util_log_info(
'The {} of Total {}'.format(count,
len(option_contract_list)),
ui_log=ui_log
)
strLogProgress = 'DOWNLOAD PROGRESS {} '.format(
str(float(count / len(option_contract_list) * 100))[0:4] + '%'
)
intLogProgress = int(float(count / len(option_contract_list) * 10000.0))
QA_util_log_info(
strLogProgress,
ui_log=ui_log,
ui_progress=ui_progress,
ui_progress_int_value=intLogProgress
)
count = count + 1
if len(err) < 1:
QA_util_log_info('SUCCESS', ui_log=ui_log)
else:
QA_util_log_info(' ERROR CODE \n ', ui_log=ui_log)
QA_util_log_info(err, ui_log=ui_log) |
:param client:
:return:
def QA_SU_save_option_day(client=DATABASE, ui_log=None, ui_progress=None):
'''
:param client:
:return:
'''
option_contract_list = QA_fetch_get_option_50etf_contract_time_to_market()
coll_option_day = client.option_day
coll_option_day.create_index(
[("code",
pymongo.ASCENDING),
("date_stamp",
pymongo.ASCENDING)]
)
err = []
# 索引 code
def __saving_work(code, coll_option_day):
try:
QA_util_log_info(
'##JOB12 Now Saving OPTION_DAY==== {}'.format(str(code)),
ui_log=ui_log
)
# 首选查找数据库 是否 有 这个代码的数据
# 期权代码 从 10000001 开始编码 10001228
ref = coll_option_day.find({'code': str(code)[0:8]})
end_date = str(now_time())[0:10]
# 当前数据库已经包含了这个代码的数据, 继续增量更新
# 加入这个判断的原因是因为如果是刚上市的 数据库会没有数据 所以会有负索引问题出现
if ref.count() > 0:
# 接着上次获取的日期继续更新
start_date = ref[ref.count() - 1]['date']
QA_util_log_info(
' 上次获取期权日线数据的最后日期是 {}'.format(start_date),
ui_log=ui_log
)
QA_util_log_info(
'UPDATE_OPTION_DAY \n 从上一次下载数据开始继续 Trying update {} from {} to {}'
.format(code,
start_date,
end_date),
ui_log=ui_log
)
if start_date != end_date:
start_date0 = QA_util_get_next_day(start_date)
df0 = QA_fetch_get_option_day(
code=code,
start_date=start_date0,
end_date=end_date,
frequence='day',
ip=None,
port=None
)
retCount = df0.iloc[:, 0].size
QA_util_log_info(
"日期从开始{}-结束{} , 合约代码{} , 返回了{}条记录 , 准备写入数据库".format(
start_date0,
end_date,
code,
retCount
),
ui_log=ui_log
)
coll_option_day.insert_many(
QA_util_to_json_from_pandas(df0)
)
else:
QA_util_log_info(
"^已经获取过这天的数据了^ {}".format(start_date),
ui_log=ui_log
)
else:
start_date = '1990-01-01'
QA_util_log_info(
'UPDATE_OPTION_DAY \n 从新开始下载数据 Trying update {} from {} to {}'
.format(code,
start_date,
end_date),
ui_log=ui_log
)
if start_date != end_date:
df0 = QA_fetch_get_option_day(
code=code,
start_date=start_date,
end_date=end_date,
frequence='day',
ip=None,
port=None
)
retCount = df0.iloc[:, 0].size
QA_util_log_info(
"日期从开始{}-结束{} , 合约代码{} , 获取了{}条记录 , 准备写入数据库^_^ ".format(
start_date,
end_date,
code,
retCount
),
ui_log=ui_log
)
coll_option_day.insert_many(
QA_util_to_json_from_pandas(df0)
)
else:
QA_util_log_info(
"*已经获取过这天的数据了* {}".format(start_date),
ui_log=ui_log
)
except Exception as error0:
print(error0)
err.append(str(code))
for item in range(len(option_contract_list)):
QA_util_log_info(
'The {} of Total {}'.format(item,
len(option_contract_list)),
ui_log=ui_log
)
strLogProgress = 'DOWNLOAD PROGRESS {} '.format(
str(float(item / len(option_contract_list) * 100))[0:4] + '%'
)
intLogProgress = int(float(item / len(option_contract_list) * 10000.0))
QA_util_log_info(
strLogProgress,
ui_log=ui_log,
ui_progress=ui_progress,
ui_progress_int_value=intLogProgress
)
__saving_work(option_contract_list[item].code, coll_option_day)
if len(err) < 1:
QA_util_log_info('SUCCESS save option day ^_^ ', ui_log=ui_log)
else:
QA_util_log_info(' ERROR CODE \n ', ui_log=ui_log)
QA_util_log_info(err, ui_log=ui_log) |
save future_day
保存日线数据
:param client:
:param ui_log: 给GUI qt 界面使用
:param ui_progress: 给GUI qt 界面使用
:param ui_progress_int_value: 给GUI qt 界面使用
:return:
def QA_SU_save_future_day(client=DATABASE, ui_log=None, ui_progress=None):
'''
save future_day
保存日线数据
:param client:
:param ui_log: 给GUI qt 界面使用
:param ui_progress: 给GUI qt 界面使用
:param ui_progress_int_value: 给GUI qt 界面使用
:return:
'''
future_list = [
item for item in QA_fetch_get_future_list().code.unique().tolist()
if str(item)[-2:] in ['L8',
'L9']
]
coll_future_day = client.future_day
coll_future_day.create_index(
[("code",
pymongo.ASCENDING),
("date_stamp",
pymongo.ASCENDING)]
)
err = []
def __saving_work(code, coll_future_day):
try:
QA_util_log_info(
'##JOB12 Now Saving Future_DAY==== {}'.format(str(code)),
ui_log
)
# 首选查找数据库 是否 有 这个代码的数据
ref = coll_future_day.find({'code': str(code)[0:4]})
end_date = str(now_time())[0:10]
# 当前数据库已经包含了这个代码的数据, 继续增量更新
# 加入这个判断的原因是因为如果股票是刚上市的 数据库会没有数据 所以会有负索引问题出现
if ref.count() > 0:
# 接着上次获取的日期继续更新
start_date = ref[ref.count() - 1]['date']
QA_util_log_info(
'UPDATE_Future_DAY \n Trying updating {} from {} to {}'
.format(code,
start_date,
end_date),
ui_log
)
if start_date != end_date:
coll_future_day.insert_many(
QA_util_to_json_from_pandas(
QA_fetch_get_future_day(
str(code),
QA_util_get_next_day(start_date),
end_date
)
)
)
# 当前数据库中没有这个代码的股票数据, 从1990-01-01 开始下载所有的数据
else:
start_date = '2001-01-01'
QA_util_log_info(
'UPDATE_Future_DAY \n Trying updating {} from {} to {}'
.format(code,
start_date,
end_date),
ui_log
)
if start_date != end_date:
coll_future_day.insert_many(
QA_util_to_json_from_pandas(
QA_fetch_get_future_day(
str(code),
start_date,
end_date
)
)
)
except Exception as error0:
print(error0)
err.append(str(code))
for item in range(len(future_list)):
QA_util_log_info('The {} of Total {}'.format(item, len(future_list)))
strProgressToLog = 'DOWNLOAD PROGRESS {} {}'.format(
str(float(item / len(future_list) * 100))[0:4] + '%',
ui_log
)
intProgressToLog = int(float(item / len(future_list) * 100))
QA_util_log_info(
strProgressToLog,
ui_log=ui_log,
ui_progress=ui_progress,
ui_progress_int_value=intProgressToLog
)
__saving_work(future_list[item], coll_future_day)
if len(err) < 1:
QA_util_log_info('SUCCESS save future day ^_^', ui_log)
else:
QA_util_log_info(' ERROR CODE \n ', ui_log)
QA_util_log_info(err, ui_log) |
save future_min
Keyword Arguments:
client {[type]} -- [description] (default: {DATABASE})
def QA_SU_save_future_min(client=DATABASE, ui_log=None, ui_progress=None):
"""save future_min
Keyword Arguments:
client {[type]} -- [description] (default: {DATABASE})
"""
future_list = [
item for item in QA_fetch_get_future_list().code.unique().tolist()
if str(item)[-2:] in ['L8',
'L9']
]
coll = client.future_min
coll.create_index(
[
('code',
pymongo.ASCENDING),
('time_stamp',
pymongo.ASCENDING),
('date_stamp',
pymongo.ASCENDING)
]
)
err = []
def __saving_work(code, coll):
QA_util_log_info(
'##JOB13 Now Saving Future_MIN ==== {}'.format(str(code)),
ui_log=ui_log
)
try:
for type in ['1min', '5min', '15min', '30min', '60min']:
ref_ = coll.find({'code': str(code)[0:6], 'type': type})
end_time = str(now_time())[0:19]
if ref_.count() > 0:
start_time = ref_[ref_.count() - 1]['datetime']
QA_util_log_info(
'##JOB13.{} Now Saving Future {} from {} to {} =={} '
.format(
['1min',
'5min',
'15min',
'30min',
'60min'].index(type),
str(code),
start_time,
end_time,
type
),
ui_log=ui_log
)
if start_time != end_time:
__data = QA_fetch_get_future_min(
str(code),
start_time,
end_time,
type
)
if len(__data) > 1:
coll.insert_many(
QA_util_to_json_from_pandas(__data[1::])
)
else:
start_time = '2015-01-01'
QA_util_log_info(
'##JOB13.{} Now Saving Future {} from {} to {} =={} '
.format(
['1min',
'5min',
'15min',
'30min',
'60min'].index(type),
str(code),
start_time,
end_time,
type
),
ui_log=ui_log
)
if start_time != end_time:
__data = QA_fetch_get_future_min(
str(code),
start_time,
end_time,
type
)
if len(__data) > 1:
coll.insert_many(
QA_util_to_json_from_pandas(__data)
)
except:
err.append(code)
executor = ThreadPoolExecutor(max_workers=4)
res = {
executor.submit(__saving_work,
future_list[i_],
coll)
for i_ in range(len(future_list))
} # multi index ./.
count = 0
for i_ in concurrent.futures.as_completed(res):
QA_util_log_info(
'The {} of Total {}'.format(count,
len(future_list)),
ui_log=ui_log
)
strLogProgress = 'DOWNLOAD PROGRESS {} '.format(
str(float(count / len(future_list) * 100))[0:4] + '%'
)
intLogProgress = int(float(count / len(future_list) * 10000.0))
QA_util_log_info(
strLogProgress,
ui_log=ui_log,
ui_progress=ui_progress,
ui_progress_int_value=intLogProgress
)
count = count + 1
if len(err) < 1:
QA_util_log_info('SUCCESS', ui_log=ui_log)
else:
QA_util_log_info(' ERROR CODE \n ', ui_log=ui_log)
QA_util_log_info(err, ui_log=ui_log) |
run a shell commad
def do_shell(self, arg):
"run a shell commad"
print(">", arg)
sub_cmd = subprocess.Popen(arg, shell=True, stdout=subprocess.PIPE)
print(sub_cmd.communicate()[0]) |
'获取股票分钟线'
:param code: 字符串str eg 600085
:param start: 字符串str 开始日期 eg 2011-01-01
:param end: 字符串str 结束日期 eg 2011-05-01
:param frequence: 字符串str 分钟线的类型 支持 1min 1m 5min 5m 15min 15m 30min 30m 60min 60m 类型
:param if_drop_index: Ture False , dataframe drop index or not
:param collections: mongodb 数据库
:return: QA_DataStruct_Stock_min 类型
def QA_fetch_stock_min_adv(
code,
start, end=None,
frequence='1min',
if_drop_index=True,
# 🛠 todo collections 参数没有用到, 且数据库是固定的, 这个变量后期去掉
collections=DATABASE.stock_min):
'''
'获取股票分钟线'
:param code: 字符串str eg 600085
:param start: 字符串str 开始日期 eg 2011-01-01
:param end: 字符串str 结束日期 eg 2011-05-01
:param frequence: 字符串str 分钟线的类型 支持 1min 1m 5min 5m 15min 15m 30min 30m 60min 60m 类型
:param if_drop_index: Ture False , dataframe drop index or not
:param collections: mongodb 数据库
:return: QA_DataStruct_Stock_min 类型
'''
if frequence in ['1min', '1m']:
frequence = '1min'
elif frequence in ['5min', '5m']:
frequence = '5min'
elif frequence in ['15min', '15m']:
frequence = '15min'
elif frequence in ['30min', '30m']:
frequence = '30min'
elif frequence in ['60min', '60m']:
frequence = '60min'
else:
print("QA Error QA_fetch_stock_min_adv parameter frequence=%s is none of 1min 1m 5min 5m 15min 15m 30min 30m 60min 60m" % frequence)
return None
# __data = [] 未使用
end = start if end is None else end
if len(start) == 10:
start = '{} 09:30:00'.format(start)
if len(end) == 10:
end = '{} 15:00:00'.format(end)
if start == end:
# 🛠 todo 如果相等,根据 frequence 获取开始时间的 时间段 QA_fetch_stock_min, 不支持start end是相等的
print("QA Error QA_fetch_stock_min_adv parameter code=%s , start=%s, end=%s is equal, should have time span! " % (
code, start, end))
return None
# 🛠 todo 报告错误 如果开始时间 在 结束时间之后
res = QA_fetch_stock_min(
code, start, end, format='pd', frequence=frequence)
if res is None:
print("QA Error QA_fetch_stock_min_adv parameter code=%s , start=%s, end=%s frequence=%s call QA_fetch_stock_min return None" % (
code, start, end, frequence))
return None
else:
res_set_index = res.set_index(['datetime', 'code'], drop=if_drop_index)
# if res_set_index is None:
# print("QA Error QA_fetch_stock_min_adv set index 'datetime, code' return None")
# return None
return QA_DataStruct_Stock_min(res_set_index) |
'返回全市场某一天的数据'
:param date:
:return: QA_DataStruct_Stock_day类 型数据
def QA_fetch_stock_day_full_adv(date):
'''
'返回全市场某一天的数据'
:param date:
:return: QA_DataStruct_Stock_day类 型数据
'''
# 🛠 todo 检查日期data参数
res = QA_fetch_stock_full(date, 'pd')
if res is None:
print("QA Error QA_fetch_stock_day_full_adv parameter date=%s call QA_fetch_stock_full return None" % (date))
return None
else:
res_set_index = res.set_index(['date', 'code'])
# if res_set_index is None:
# print("QA Error QA_fetch_stock_day_full set index 'date, code' return None")
return QA_DataStruct_Stock_day(res_set_index) |
:param code: code: 字符串str eg 600085
:param start: 字符串str 开始日期 eg 2011-01-01
:param end: 字符串str 结束日期 eg 2011-05-01
:param if_drop_index: Ture False , dataframe drop index or not
:param collections: mongodb 数据库
:return:
def QA_fetch_index_day_adv(
code,
start, end=None,
if_drop_index=True,
# 🛠 todo collections 参数没有用到, 且数据库是固定的, 这个变量后期去掉
collections=DATABASE.index_day):
'''
:param code: code: 字符串str eg 600085
:param start: 字符串str 开始日期 eg 2011-01-01
:param end: 字符串str 结束日期 eg 2011-05-01
:param if_drop_index: Ture False , dataframe drop index or not
:param collections: mongodb 数据库
:return:
'''
'获取指数日线'
end = start if end is None else end
start = str(start)[0:10]
end = str(end)[0:10]
# 🛠 todo 报告错误 如果开始时间 在 结束时间之后
# 🛠 todo 如果相等
res = QA_fetch_index_day(code, start, end, format='pd')
if res is None:
print("QA Error QA_fetch_index_day_adv parameter code=%s start=%s end=%s call QA_fetch_index_day return None" % (
code, start, end))
return None
else:
res_set_index = res.set_index(['date', 'code'], drop=if_drop_index)
# if res_set_index is None:
# print("QA Error QA_fetch_index_day_adv set index 'date, code' return None")
# return None
return QA_DataStruct_Index_day(res_set_index) |
'获取股票分钟线'
:param code:
:param start:
:param end:
:param frequence:
:param if_drop_index:
:param collections:
:return:
def QA_fetch_index_min_adv(
code,
start, end=None,
frequence='1min',
if_drop_index=True,
collections=DATABASE.index_min):
'''
'获取股票分钟线'
:param code:
:param start:
:param end:
:param frequence:
:param if_drop_index:
:param collections:
:return:
'''
if frequence in ['1min', '1m']:
frequence = '1min'
elif frequence in ['5min', '5m']:
frequence = '5min'
elif frequence in ['15min', '15m']:
frequence = '15min'
elif frequence in ['30min', '30m']:
frequence = '30min'
elif frequence in ['60min', '60m']:
frequence = '60min'
# __data = [] 没有使用
end = start if end is None else end
if len(start) == 10:
start = '{} 09:30:00'.format(start)
if len(end) == 10:
end = '{} 15:00:00'.format(end)
# 🛠 todo 报告错误 如果开始时间 在 结束时间之后
# if start == end:
# 🛠 todo 如果相等,根据 frequence 获取开始时间的 时间段 QA_fetch_index_min_adv, 不支持start end是相等的
#print("QA Error QA_fetch_index_min_adv parameter code=%s , start=%s, end=%s is equal, should have time span! " % (code, start, end))
# return None
res = QA_fetch_index_min(
code, start, end, format='pd', frequence=frequence)
if res is None:
print("QA Error QA_fetch_index_min_adv parameter code=%s start=%s end=%s frequence=%s call QA_fetch_index_min return None" % (
code, start, end, frequence))
else:
res_reset_index = res.set_index(
['datetime', 'code'], drop=if_drop_index)
# if res_reset_index is None:
# print("QA Error QA_fetch_index_min_adv set index 'date, code' return None")
return QA_DataStruct_Index_min(res_reset_index) |
'获取股票列表'
:param collections: mongodb 数据库
:return: DataFrame
def QA_fetch_stock_list_adv(collections=DATABASE.stock_list):
'''
'获取股票列表'
:param collections: mongodb 数据库
:return: DataFrame
'''
stock_list_items = QA_fetch_stock_list(collections)
if len(stock_list_items) == 0:
print("QA Error QA_fetch_stock_list_adv call item for item in collections.find() return 0 item, maybe the DATABASE.stock_list is empty!")
return None
return stock_list_items |
'获取股票列表'
:param collections: mongodb 数据库
:return: DataFrame
def QA_fetch_index_list_adv(collections=DATABASE.index_list):
'''
'获取股票列表'
:param collections: mongodb 数据库
:return: DataFrame
'''
index_list_items = QA_fetch_index_list(collections)
if len(index_list_items) == 0:
print("QA Error QA_fetch_index_list_adv call item for item in collections.find() return 0 item, maybe the DATABASE.index_list is empty!")
return None
return index_list_items |
:param code: code: 字符串str eg 600085
:param start: 字符串str 开始日期 eg 2011-01-01
:param end: 字符串str 结束日期 eg 2011-05-01
:param if_drop_index: Ture False , dataframe drop index or not
:param collections: mongodb 数据库
:return:
def QA_fetch_future_day_adv(
code,
start, end=None,
if_drop_index=True,
# 🛠 todo collections 参数没有用到, 且数据库是固定的, 这个变量后期去掉
collections=DATABASE.index_day):
'''
:param code: code: 字符串str eg 600085
:param start: 字符串str 开始日期 eg 2011-01-01
:param end: 字符串str 结束日期 eg 2011-05-01
:param if_drop_index: Ture False , dataframe drop index or not
:param collections: mongodb 数据库
:return:
'''
'获取期货日线'
end = start if end is None else end
start = str(start)[0:10]
end = str(end)[0:10]
# 🛠 todo 报告错误 如果开始时间 在 结束时间之后
# 🛠 todo 如果相等
res = QA_fetch_future_day(code, start, end, format='pd')
if res is None:
print("QA Error QA_fetch_future_day_adv parameter code=%s start=%s end=%s call QA_fetch_future_day return None" % (
code, start, end))
else:
res_set_index = res.set_index(['date', 'code'])
# if res_set_index is None:
# print("QA Error QA_fetch_index_day_adv set index 'date, code' return None")
# return None
return QA_DataStruct_Future_day(res_set_index) |
'获取股票分钟线'
:param code:
:param start:
:param end:
:param frequence:
:param if_drop_index:
:param collections:
:return:
def QA_fetch_future_min_adv(
code,
start, end=None,
frequence='1min',
if_drop_index=True,
collections=DATABASE.future_min):
'''
'获取股票分钟线'
:param code:
:param start:
:param end:
:param frequence:
:param if_drop_index:
:param collections:
:return:
'''
if frequence in ['1min', '1m']:
frequence = '1min'
elif frequence in ['5min', '5m']:
frequence = '5min'
elif frequence in ['15min', '15m']:
frequence = '15min'
elif frequence in ['30min', '30m']:
frequence = '30min'
elif frequence in ['60min', '60m']:
frequence = '60min'
# __data = [] 没有使用
end = start if end is None else end
if len(start) == 10:
start = '{} 00:00:00'.format(start)
if len(end) == 10:
end = '{} 15:00:00'.format(end)
# 🛠 todo 报告错误 如果开始时间 在 结束时间之后
# if start == end:
# 🛠 todo 如果相等,根据 frequence 获取开始时间的 时间段 QA_fetch_index_min_adv, 不支持start end是相等的
#print("QA Error QA_fetch_index_min_adv parameter code=%s , start=%s, end=%s is equal, should have time span! " % (code, start, end))
# return None
res = QA_fetch_future_min(
code, start, end, format='pd', frequence=frequence)
if res is None:
print("QA Error QA_fetch_future_min_adv parameter code=%s start=%s end=%s frequence=%s call QA_fetch_future_min return None" % (
code, start, end, frequence))
else:
res_reset_index = res.set_index(
['datetime', 'code'], drop=if_drop_index)
# if res_reset_index is None:
# print("QA Error QA_fetch_index_min_adv set index 'date, code' return None")
return QA_DataStruct_Future_min(res_reset_index) |
'获取股票列表'
:param collections: mongodb 数据库
:return: DataFrame
def QA_fetch_future_list_adv(collections=DATABASE.future_list):
'''
'获取股票列表'
:param collections: mongodb 数据库
:return: DataFrame
'''
future_list_items = QA_fetch_future_list()
if len(future_list_items) == 0:
print("QA Error QA_fetch_future_list_adv call item for item in collections.find() return 0 item, maybe the DATABASE.future_list is empty!")
return None
return future_list_items |
返回板块 ❌
:param code:
:param blockname:
:param collections: 默认数据库 stock_block
:return: QA_DataStruct_Stock_block
def QA_fetch_stock_block_adv(code=None, blockname=None, collections=DATABASE.stock_block):
'''
返回板块 ❌
:param code:
:param blockname:
:param collections: 默认数据库 stock_block
:return: QA_DataStruct_Stock_block
'''
if code is not None and blockname is None:
# 返回这个股票代码所属的板块
data = pd.DataFrame([item for item in collections.find(
{'code': {'$in': code}})])
data = data.drop(['_id'], axis=1)
return QA_DataStruct_Stock_block(data.set_index(['blockname', 'code'], drop=True).drop_duplicates())
elif blockname is not None and code is None:
#
# 🛠 todo fnished 返回 这个板块所有的股票
# 返回该板块所属的股票
# print("QA Error blockname is Not none code none, return all code from its block name have not implemented yet !")
items_from_collections = [item for item in collections.find(
{'blockname': re.compile(blockname)})]
data = pd.DataFrame(items_from_collections).drop(['_id'], axis=1)
data_set_index = data.set_index(['blockname', 'code'], drop=True)
return QA_DataStruct_Stock_block(data_set_index)
else:
# 🛠 todo 返回 判断 这个股票是否和属于该板块
data = pd.DataFrame(
[item for item in collections.find()]).drop(['_id'], axis=1)
data_set_index = data.set_index(['blockname', 'code'], drop=True)
return QA_DataStruct_Stock_block(data_set_index) |
返回当日的上下五档, code可以是股票可以是list, num是每个股票获取的数量
:param code:
:param num:
:param collections: realtime_XXXX-XX-XX 每天实时时间
:return: DataFrame
def QA_fetch_stock_realtime_adv(code=None,
num=1,
collections=DATABASE.get_collection('realtime_{}'.format(datetime.date.today()))):
'''
返回当日的上下五档, code可以是股票可以是list, num是每个股票获取的数量
:param code:
:param num:
:param collections: realtime_XXXX-XX-XX 每天实时时间
:return: DataFrame
'''
if code is not None:
# code 必须转换成list 去查询数据库
if isinstance(code, str):
code = [code]
elif isinstance(code, list):
pass
else:
print(
"QA Error QA_fetch_stock_realtime_adv parameter code is not List type or String type")
items_from_collections = [item for item in collections.find(
{'code': {'$in': code}}, limit=num*len(code), sort=[('datetime', pymongo.DESCENDING)])]
if items_from_collections is None:
print("QA Error QA_fetch_stock_realtime_adv find parameter code={} num={} collection={} return NOne".format(
code, num, collections))
return
data = pd.DataFrame(items_from_collections)
data_set_index = data.set_index(
['datetime', 'code'], drop=False).drop(['_id'], axis=1)
return data_set_index
else:
print("QA Error QA_fetch_stock_realtime_adv parameter code is None") |
高级财务查询接口
Arguments:
code {[type]} -- [description]
start {[type]} -- [description]
Keyword Arguments:
end {[type]} -- [description] (default: {None})
def QA_fetch_financial_report_adv(code, start, end=None, ltype='EN'):
"""高级财务查询接口
Arguments:
code {[type]} -- [description]
start {[type]} -- [description]
Keyword Arguments:
end {[type]} -- [description] (default: {None})
"""
if end is None:
return QA_DataStruct_Financial(QA_fetch_financial_report(code, start, ltype=ltype))
else:
series = pd.Series(
data=month_data, index=pd.to_datetime(month_data), name='date')
timerange = series.loc[start:end].tolist()
return QA_DataStruct_Financial(QA_fetch_financial_report(code, timerange, ltype=ltype)) |
获取股票日线
def QA_fetch_stock_financial_calendar_adv(code, start="all", end=None, format='pd', collections=DATABASE.report_calendar):
'获取股票日线'
#code= [code] if isinstance(code,str) else code
end = start if end is None else end
start = str(start)[0:10]
end = str(end)[0:10]
# code checking
if start == 'all':
start = '1990-01-01'
end = str(datetime.date.today())
if end is None:
return QA_DataStruct_Financial(QA_fetch_stock_financial_calendar(code, start, str(datetime.date.today())))
else:
series = pd.Series(
data=month_data, index=pd.to_datetime(month_data), name='date')
timerange = series.loc[start:end].tolist()
return QA_DataStruct_Financial(QA_fetch_stock_financial_calendar(code, start, end)) |
QUANTAXIS 读取历史交易记录 通达信 历史成交-输出-xlsfile--转换csvfile
def QA_fetch_get_tdxtraderecord(file):
"""
QUANTAXIS 读取历史交易记录 通达信 历史成交-输出-xlsfile--转换csvfile
"""
try:
with open('./20180606.csv', 'r') as f:
l = csv.reader(f)
data = [item for item in l]
res = pd.DataFrame(data[1:], columns=data[0])
return res
except:
raise IOError('QA CANNOT READ THIS RECORD') |
阿隆指标
Arguments:
DataFrame {[type]} -- [description]
Keyword Arguments:
N {int} -- [description] (default: {14})
Returns:
[type] -- [description]
def AROON(DataFrame, N=14):
"""阿隆指标
Arguments:
DataFrame {[type]} -- [description]
Keyword Arguments:
N {int} -- [description] (default: {14})
Returns:
[type] -- [description]
"""
ar_up, ar_down = talib.AROON(DataFrame.high.values, DataFrame.low.values, N)
return pd.DataFrame({'AROON_UP': ar_up,'AROON_DOWN': ar_down}, index=DataFrame.index) |
当前无法区分是百分比还是按手数收费,不过可以拿到以后自行判断
def get_commission_coeff(self, code):
"""
当前无法区分是百分比还是按手数收费,不过可以拿到以后自行判断
"""
return max(self.get_code(code).get('commission_coeff_peramount'),
self.get_code(code).get('commission_coeff_pervol')) |
trade是一个可迭代的list/generator
def import_trade(self, trade):
"""
trade是一个可迭代的list/generator
"""
for item in trade:
self.make_deal(item.code, item.datetime, item.amount,
item.towards, item.price.item.order_model, item.amount_model) |
这是一个一定会成交,并且立刻结转(及t+0)的交易入口
def make_deal(self, code, datetime, amount=100, towards=ORDER_DIRECTION.BUY, price=0, order_model=ORDER_MODEL.MARKET, amount_model=AMOUNT_MODEL.BY_AMOUNT):
"""
这是一个一定会成交,并且立刻结转(及t+0)的交易入口
"""
self.account.receive_deal(self.backtest_broker.receive_order(QA_Event(order=self.account.send_order(
code=code, time=datetime, amount=amount, towards=towards, price=price, order_model=order_model, amount_model=amount_model
))))
self.account.settle() |
撤单
Arguments:
amount {int} -- 撤单数量
def cancel(self):
"""撤单
Arguments:
amount {int} -- 撤单数量
"""
self.cancel_amount = self.amount - self.trade_amount
if self.trade_amount == 0:
# 未交易 直接订单全撤
self._status = ORDER_STATUS.CANCEL_ALL
else:
# 部分交易 剩余订单全撤
self._status = ORDER_STATUS.CANCEL_PART |
失败订单(未成功创建入broker)
Arguments:
reason {str} -- 失败原因
def failed(self, reason=None):
"""失败订单(未成功创建入broker)
Arguments:
reason {str} -- 失败原因
"""
# 订单创建失败(如废单/场外废单/价格高于涨停价/价格低于跌停价/通讯失败)
self._status = ORDER_STATUS.FAILED
self.reason = str(reason) |
trade 状态
Arguments:
amount {[type]} -- [description]
def trade(self, trade_id, trade_price, trade_amount, trade_time):
"""trade 状态
Arguments:
amount {[type]} -- [description]
"""
if self.status in [ORDER_STATUS.SUCCESS_PART, ORDER_STATUS.QUEUED]:
trade_amount = int(trade_amount)
trade_id = str(trade_id)
if trade_amount < 1:
self._status = ORDER_STATUS.NEXT
else:
if trade_id not in self.trade_id:
trade_price = float(trade_price)
trade_time = str(trade_time)
self.trade_id.append(trade_id)
self.trade_price = (
self.trade_price * self.trade_amount +
trade_price * trade_amount
) / (
self.trade_amount + trade_amount
)
self.trade_amount += trade_amount
self.trade_time.append(trade_time)
self.callback(
self.code,
trade_id,
self.order_id,
self.realorder_id,
trade_price,
trade_amount,
self.towards,
trade_time
)
else:
pass
else:
raise RuntimeError(
'ORDER STATUS {} CANNNOT TRADE'.format(self.status)
) |
{
"aid": "insert_order", # //必填, 下单请求
# //必填, 需要与登录用户名一致, 或为登录用户的子账户(例如登录用户为user1, 则报单 user_id 应当为 user1 或 user1.some_unit)
"user_id": account_cookie,
# //必填, 委托单号, 需确保在一个账号中不重复, 限长512字节
"order_id": order_id if order_id else QA.QA_util_random_with_topic('QAOTG'),
"exchange_id": exchange_id, # //必填, 下单到哪个交易所
"instrument_id": code, # //必填, 下单合约代码
"direction": order_direction, # //必填, 下单买卖方向
# //必填, 下单开平方向, 仅当指令相关对象不支持开平机制(例如股票)时可不填写此字段
"offset": order_offset,
"volume": volume, # //必填, 下单手数
"price_type": "LIMIT", # //必填, 报单价格类型
"limit_price": price, # //当 price_type == LIMIT 时需要填写此字段, 报单价格
"volume_condition": "ANY",
"time_condition": "GFD",
}
def to_otgdict(self):
"""{
"aid": "insert_order", # //必填, 下单请求
# //必填, 需要与登录用户名一致, 或为登录用户的子账户(例如登录用户为user1, 则报单 user_id 应当为 user1 或 user1.some_unit)
"user_id": account_cookie,
# //必填, 委托单号, 需确保在一个账号中不重复, 限长512字节
"order_id": order_id if order_id else QA.QA_util_random_with_topic('QAOTG'),
"exchange_id": exchange_id, # //必填, 下单到哪个交易所
"instrument_id": code, # //必填, 下单合约代码
"direction": order_direction, # //必填, 下单买卖方向
# //必填, 下单开平方向, 仅当指令相关对象不支持开平机制(例如股票)时可不填写此字段
"offset": order_offset,
"volume": volume, # //必填, 下单手数
"price_type": "LIMIT", # //必填, 报单价格类型
"limit_price": price, # //当 price_type == LIMIT 时需要填写此字段, 报单价格
"volume_condition": "ANY",
"time_condition": "GFD",
}
"""
return {
"aid": "insert_order", # //必填, 下单请求
# //必填, 需要与登录用户名一致, 或为登录用户的子账户(例如登录用户为user1, 则报单 user_id 应当为 user1 或 user1.some_unit)
"user_id": self.account_cookie,
# //必填, 委托单号, 需确保在一个账号中不重复, 限长512字节
"order_id": self.order_id,
"exchange_id": self.exchange_id, # //必填, 下单到哪个交易所
"instrument_id": self.code, # //必填, 下单合约代码
"direction": self.direction, # //必填, 下单买卖方向
# //必填, 下单开平方向, 仅当指令相关对象不支持开平机制(例如股票)时可不填写此字段
"offset": self.offset,
"volume": self.amount, # //必填, 下单手数
"price_type": self.order_model, # //必填, 报单价格类型
"limit_price": self.price, # //当 price_type == LIMIT 时需要填写此字段, 报单价格
"volume_condition": "ANY",
"time_condition": "GFD",
} |
[summary]
Arguments:
otgOrder {[type]} -- [description]
{'seqno': 6,
'user_id': '106184',
'order_id': 'WDRB_QA01_FtNlyBem',
'exchange_id': 'SHFE',
'instrument_id': 'rb1905',
'direction': 'SELL',
'offset': 'OPEN',
'volume_orign': 50, #(总报单手数)
'price_type': 'LIMIT', # "LIMIT" (价格类型, ANY=市价, LIMIT=限价)
'limit_price': 3432.0, # 4500.0 (委托价格, 仅当 price_type = LIMIT 时有效)
'time_condition': 'GFD',# "GFD" (时间条件, IOC=立即完成,否则撤销, GFS=本节有效, GFD=当日有效, GTC=撤销前有效, GFA=集合竞价有效)
'volume_condition': 'ANY', # "ANY" (手数条件, ANY=任何数量, MIN=最小数量, ALL=全部数量)
'insert_date_time': 1545656460000000000,# 1501074872000000000 (下单时间(按北京时间),自unix epoch(1970-01-01 00:00:00 GMT)以来的纳秒数)
'exchange_order_id': ' 3738',
'status': 'FINISHED', # "ALIVE" (委托单状态, ALIVE=有效, FINISHED=已完)
'volume_left': 0,
'last_msg': '全部成交报单已提交'} # "报单成功" (委托单状态信息)
def from_otgformat(self, otgOrder):
"""[summary]
Arguments:
otgOrder {[type]} -- [description]
{'seqno': 6,
'user_id': '106184',
'order_id': 'WDRB_QA01_FtNlyBem',
'exchange_id': 'SHFE',
'instrument_id': 'rb1905',
'direction': 'SELL',
'offset': 'OPEN',
'volume_orign': 50, #(总报单手数)
'price_type': 'LIMIT', # "LIMIT" (价格类型, ANY=市价, LIMIT=限价)
'limit_price': 3432.0, # 4500.0 (委托价格, 仅当 price_type = LIMIT 时有效)
'time_condition': 'GFD',# "GFD" (时间条件, IOC=立即完成,否则撤销, GFS=本节有效, GFD=当日有效, GTC=撤销前有效, GFA=集合竞价有效)
'volume_condition': 'ANY', # "ANY" (手数条件, ANY=任何数量, MIN=最小数量, ALL=全部数量)
'insert_date_time': 1545656460000000000,# 1501074872000000000 (下单时间(按北京时间),自unix epoch(1970-01-01 00:00:00 GMT)以来的纳秒数)
'exchange_order_id': ' 3738',
'status': 'FINISHED', # "ALIVE" (委托单状态, ALIVE=有效, FINISHED=已完)
'volume_left': 0,
'last_msg': '全部成交报单已提交'} # "报单成功" (委托单状态信息)
"""
self.order_id = otgOrder.get('order_id')
self.account_cookie = otgOrder.get('user_id')
self.exchange_id = otgOrder.get('exchange_id')
self.code = str(otgOrder.get('instrument_id')).upper()
self.offset = otgOrder.get('offset')
self.direction = otgOrder.get('direction')
self.towards = eval('ORDER_DIRECTION.{}_{}'.format(
self.direction,
self.offset
))
self.amount = otgOrder.get('volume_orign')
self.trade_amount = self.amount - otgOrder.get('volume_left')
self.price = otgOrder.get('limit_price')
self.order_model = eval(
'ORDER_MODEL.{}'.format(otgOrder.get('price_type'))
)
self.time_condition = otgOrder.get('time_condition')
if otgOrder.get('insert_date_time') == 0:
self.datetime = 0
else:
self.datetime = QA_util_stamp2datetime(
int(otgOrder.get('insert_date_time'))
)
self.sending_time = self.datetime
self.volume_condition = otgOrder.get('volume_condition')
self.message = otgOrder.get('last_msg')
self._status = ORDER_STATUS.NEW
if '已撤单' in self.message or '拒绝' in self.message or '仓位不足' in self.message:
# 仓位不足: 一般是平今/平昨仓位不足
self._status = ORDER_STATUS.FAILED
self.realorder_id = otgOrder.get('exchange_order_id')
return self |
从字段类型的字段 填充 对象的字段
:param order_dict: dict 类型
:return: self QA_Order
def from_dict(self, order_dict):
'''
从字段类型的字段 填充 对象的字段
:param order_dict: dict 类型
:return: self QA_Order
'''
try:
# QA_util_log_info('QA_ORDER CHANGE: from {} change to {}'.format(
# self.order_id, order['order_id']))
self.price = order_dict['price']
self.date = order_dict['date']
self.datetime = order_dict['datetime']
self.sending_time = order_dict['sending_time'] # 下单时间
self.trade_time = order_dict['trade_time']
self.amount = order_dict['amount']
self.frequence = order_dict['frequence']
self.market_type = order_dict['market_type']
self.towards = order_dict['towards']
self.code = order_dict['code']
self.user = order_dict['user']
self.account_cookie = order_dict['account_cookie']
self.strategy = order_dict['strategy']
self.type = order_dict['type']
self.order_model = order_dict['order_model']
self.amount_model = order_dict['amount_model']
self.order_id = order_dict['order_id']
self.realorder_id = order_dict['realorder_id']
self.trade_id = order_dict['trade_id']
self.callback = order_dict['callback']
self.commission_coeff = order_dict['commission_coeff']
self.tax_coeff = order_dict['tax_coeff']
self.money = order_dict['money']
self._status = order_dict['_status']
self.cancel_amount = order_dict['cancel_amount']
self.trade_amount = order_dict['trade_amount']
self.trade_price = order_dict['trade_price']
self.reason = order_dict['reason']
return self
except Exception as e:
QA_util_log_info('Failed to tran from dict {}'.format(e)) |
:param order: QA_Order类型
:return:
def insert_order(self, order):
'''
:param order: QA_Order类型
:return:
'''
#print(" *>> QAOrder!insert_order {}".format(order))
# QUEUED = 300 # queued 用于表示在order_queue中 实际表达的意思是订单存活 待成交
#order.status = ORDER_STATUS.QUEUED
# 🛠 todo 是为了速度快把order对象转换成 df 对象的吗?
#self.queue_df = self.queue_df.append(order.to_df(), ignore_index=True)
#self.queue_df.set_index('order_id', drop=True, inplace=True)
if order is not None:
self.order_list[order.order_id] = order
return order
else:
print('QAERROR Wrong for get None type while insert order to Queue') |
600 废单 未委托成功
200 委托成功,完全交易
203 委托成功,未完全成功
300 委托队列 待成交
400 已撤单
500 服务器撤单/每日结算
订单生成(100) -- 废单(600)
订单生成(100) -- 进入待成交队列(300) -- 完全成交(200) -- 每日结算(500)-- 死亡
订单生成(100) -- 进入待成交队列(300) -- 部分成交(203) -- 未成交(300) -- 每日结算(500) -- 死亡
订单生成(100) -- 进入待成交队列(300) -- 主动撤单(400) -- 每日结算(500) -- 死亡
选择待成交列表
:return: dataframe
def pending(self):
'''
600 废单 未委托成功
200 委托成功,完全交易
203 委托成功,未完全成功
300 委托队列 待成交
400 已撤单
500 服务器撤单/每日结算
订单生成(100) -- 废单(600)
订单生成(100) -- 进入待成交队列(300) -- 完全成交(200) -- 每日结算(500)-- 死亡
订单生成(100) -- 进入待成交队列(300) -- 部分成交(203) -- 未成交(300) -- 每日结算(500) -- 死亡
订单生成(100) -- 进入待成交队列(300) -- 主动撤单(400) -- 每日结算(500) -- 死亡
选择待成交列表
:return: dataframe
'''
try:
return [
item for item in self.order_list.values() if item.status in [
ORDER_STATUS.QUEUED,
ORDER_STATUS.NEXT,
ORDER_STATUS.SUCCESS_PART
]
]
except:
return [] |
使用数据库数据进行复权
def _QA_data_stock_to_fq(bfq_data, xdxr_data, fqtype):
'使用数据库数据进行复权'
info = xdxr_data.query('category==1')
bfq_data = bfq_data.assign(if_trade=1)
if len(info) > 0:
data = pd.concat(
[
bfq_data,
info.loc[bfq_data.index[0]:bfq_data.index[-1],
['category']]
],
axis=1
)
data['if_trade'].fillna(value=0, inplace=True)
data = data.fillna(method='ffill')
data = pd.concat(
[
data,
info.loc[bfq_data.index[0]:bfq_data.index[-1],
['fenhong',
'peigu',
'peigujia',
'songzhuangu']]
],
axis=1
)
else:
data = pd.concat(
[
bfq_data,
info.
loc[:,
['category',
'fenhong',
'peigu',
'peigujia',
'songzhuangu']]
],
axis=1
)
data = data.fillna(0)
data['preclose'] = (
data['close'].shift(1) * 10 - data['fenhong'] +
data['peigu'] * data['peigujia']
) / (10 + data['peigu'] + data['songzhuangu'])
if fqtype in ['01', 'qfq']:
data['adj'] = (data['preclose'].shift(-1) /
data['close']).fillna(1)[::-1].cumprod()
else:
data['adj'] = (data['close'] /
data['preclose'].shift(-1)).cumprod().shift(1).fillna(1)
for col in ['open', 'high', 'low', 'close', 'preclose']:
data[col] = data[col] * data['adj']
data['volume'] = data['volume'] / \
data['adj'] if 'volume' in data.columns else data['vol']/data['adj']
try:
data['high_limit'] = data['high_limit'] * data['adj']
data['low_limit'] = data['high_limit'] * data['adj']
except:
pass
return data.query('if_trade==1 and open != 0').drop(
['fenhong',
'peigu',
'peigujia',
'songzhuangu',
'if_trade',
'category'],
axis=1,
errors='ignore'
) |
股票 日线/分钟线 动态复权接口
def QA_data_stock_to_fq(__data, type_='01'):
def __QA_fetch_stock_xdxr(
code,
format_='pd',
collections=DATABASE.stock_xdxr
):
'获取股票除权信息/数据库'
try:
data = pd.DataFrame(
[item for item in collections.find({'code': code})]
).drop(['_id'],
axis=1)
data['date'] = pd.to_datetime(data['date'])
return data.set_index(['date', 'code'], drop=False)
except:
return pd.DataFrame(
data=[],
columns=[
'category',
'category_meaning',
'code',
'date',
'fenhong',
'fenshu',
'liquidity_after',
'liquidity_before',
'name',
'peigu',
'peigujia',
'shares_after',
'shares_before',
'songzhuangu',
'suogu',
'xingquanjia'
]
)
'股票 日线/分钟线 动态复权接口'
code = __data.index.remove_unused_levels().levels[1][0] if isinstance(
__data.index,
pd.core.indexes.multi.MultiIndex
) else __data['code'][0]
return _QA_data_stock_to_fq(
bfq_data=__data,
xdxr_data=__QA_fetch_stock_xdxr(code),
fqtype=type_
) |
Return the bot's response based on the input.
:param statement: An statement object or string.
:returns: A response to the input.
:rtype: Statement
:param additional_response_selection_parameters: Parameters to pass to the
chat bot's logic adapters to control response selection.
:type additional_response_selection_parameters: dict
:param persist_values_to_response: Values that should be saved to the response
that the chat bot generates.
:type persist_values_to_response: dict
def get_response(self, statement=None, **kwargs):
"""
Return the bot's response based on the input.
:param statement: An statement object or string.
:returns: A response to the input.
:rtype: Statement
:param additional_response_selection_parameters: Parameters to pass to the
chat bot's logic adapters to control response selection.
:type additional_response_selection_parameters: dict
:param persist_values_to_response: Values that should be saved to the response
that the chat bot generates.
:type persist_values_to_response: dict
"""
Statement = self.storage.get_object('statement')
additional_response_selection_parameters = kwargs.pop('additional_response_selection_parameters', {})
persist_values_to_response = kwargs.pop('persist_values_to_response', {})
if isinstance(statement, str):
kwargs['text'] = statement
if isinstance(statement, dict):
kwargs.update(statement)
if statement is None and 'text' not in kwargs:
raise self.ChatBotException(
'Either a statement object or a "text" keyword '
'argument is required. Neither was provided.'
)
if hasattr(statement, 'serialize'):
kwargs.update(**statement.serialize())
tags = kwargs.pop('tags', [])
text = kwargs.pop('text')
input_statement = Statement(text=text, **kwargs)
input_statement.add_tags(*tags)
# Preprocess the input statement
for preprocessor in self.preprocessors:
input_statement = preprocessor(input_statement)
# Make sure the input statement has its search text saved
if not input_statement.search_text:
input_statement.search_text = self.storage.tagger.get_bigram_pair_string(input_statement.text)
if not input_statement.search_in_response_to and input_statement.in_response_to:
input_statement.search_in_response_to = self.storage.tagger.get_bigram_pair_string(input_statement.in_response_to)
response = self.generate_response(input_statement, additional_response_selection_parameters)
# Update any response data that needs to be changed
if persist_values_to_response:
for response_key in persist_values_to_response:
response_value = persist_values_to_response[response_key]
if response_key == 'tags':
input_statement.add_tags(*response_value)
response.add_tags(*response_value)
else:
setattr(input_statement, response_key, response_value)
setattr(response, response_key, response_value)
if not self.read_only:
self.learn_response(input_statement)
# Save the response generated for the input
self.storage.create(**response.serialize())
return response |
Return a response based on a given input statement.
:param input_statement: The input statement to be processed.
def generate_response(self, input_statement, additional_response_selection_parameters=None):
"""
Return a response based on a given input statement.
:param input_statement: The input statement to be processed.
"""
Statement = self.storage.get_object('statement')
results = []
result = None
max_confidence = -1
for adapter in self.logic_adapters:
if adapter.can_process(input_statement):
output = adapter.process(input_statement, additional_response_selection_parameters)
results.append(output)
self.logger.info(
'{} selected "{}" as a response with a confidence of {}'.format(
adapter.class_name, output.text, output.confidence
)
)
if output.confidence > max_confidence:
result = output
max_confidence = output.confidence
else:
self.logger.info(
'Not processing the statement using {}'.format(adapter.class_name)
)
class ResultOption:
def __init__(self, statement, count=1):
self.statement = statement
self.count = count
# If multiple adapters agree on the same statement,
# then that statement is more likely to be the correct response
if len(results) >= 3:
result_options = {}
for result_option in results:
result_string = result_option.text + ':' + (result_option.in_response_to or '')
if result_string in result_options:
result_options[result_string].count += 1
if result_options[result_string].statement.confidence < result_option.confidence:
result_options[result_string].statement = result_option
else:
result_options[result_string] = ResultOption(
result_option
)
most_common = list(result_options.values())[0]
for result_option in result_options.values():
if result_option.count > most_common.count:
most_common = result_option
if most_common.count > 1:
result = most_common.statement
response = Statement(
text=result.text,
in_response_to=input_statement.text,
conversation=input_statement.conversation,
persona='bot:' + self.name
)
response.confidence = result.confidence
return response |
Learn that the statement provided is a valid response.
def learn_response(self, statement, previous_statement=None):
"""
Learn that the statement provided is a valid response.
"""
if not previous_statement:
previous_statement = statement.in_response_to
if not previous_statement:
previous_statement = self.get_latest_response(statement.conversation)
if previous_statement:
previous_statement = previous_statement.text
previous_statement_text = previous_statement
if not isinstance(previous_statement, (str, type(None), )):
statement.in_response_to = previous_statement.text
elif isinstance(previous_statement, str):
statement.in_response_to = previous_statement
self.logger.info('Adding "{}" as a response to "{}"'.format(
statement.text,
previous_statement_text
))
# Save the input statement
return self.storage.create(**statement.serialize()) |
:returns: A dictionary representation of the statement object.
:rtype: dict
def serialize(self):
"""
:returns: A dictionary representation of the statement object.
:rtype: dict
"""
data = {}
for field_name in self.get_statement_field_names():
format_method = getattr(self, 'get_{}'.format(
field_name
), None)
if format_method:
data[field_name] = format_method()
else:
data[field_name] = getattr(self, field_name)
return data |
Imports the specified module based on the
dot notated import path for the module.
def import_module(dotted_path):
"""
Imports the specified module based on the
dot notated import path for the module.
"""
import importlib
module_parts = dotted_path.split('.')
module_path = '.'.join(module_parts[:-1])
module = importlib.import_module(module_path)
return getattr(module, module_parts[-1]) |
:param data: A string or dictionary containing a import_path attribute.
def initialize_class(data, *args, **kwargs):
"""
:param data: A string or dictionary containing a import_path attribute.
"""
if isinstance(data, dict):
import_path = data.get('import_path')
data.update(kwargs)
Class = import_module(import_path)
return Class(*args, **data)
else:
Class = import_module(data)
return Class(*args, **kwargs) |
Raises an exception if validate_class is not a
subclass of adapter_class.
:param validate_class: The class to be validated.
:type validate_class: class
:param adapter_class: The class type to check against.
:type adapter_class: class
:raises: Adapter.InvalidAdapterTypeException
def validate_adapter_class(validate_class, adapter_class):
"""
Raises an exception if validate_class is not a
subclass of adapter_class.
:param validate_class: The class to be validated.
:type validate_class: class
:param adapter_class: The class type to check against.
:type adapter_class: class
:raises: Adapter.InvalidAdapterTypeException
"""
from chatterbot.adapters import Adapter
# If a dictionary was passed in, check if it has an import_path attribute
if isinstance(validate_class, dict):
if 'import_path' not in validate_class:
raise Adapter.InvalidAdapterTypeException(
'The dictionary {} must contain a value for "import_path"'.format(
str(validate_class)
)
)
# Set the class to the import path for the next check
validate_class = validate_class.get('import_path')
if not issubclass(import_module(validate_class), adapter_class):
raise Adapter.InvalidAdapterTypeException(
'{} must be a subclass of {}'.format(
validate_class,
adapter_class.__name__
)
) |
Returns the amount of time taken for a given
chat bot to return a response.
:param chatbot: A chat bot instance.
:type chatbot: ChatBot
:returns: The response time in seconds.
:rtype: float
def get_response_time(chatbot, statement='Hello'):
"""
Returns the amount of time taken for a given
chat bot to return a response.
:param chatbot: A chat bot instance.
:type chatbot: ChatBot
:returns: The response time in seconds.
:rtype: float
"""
import time
start_time = time.time()
chatbot.get_response(statement)
return time.time() - start_time |
Print progress bar
:param description: Training description
:type description: str
:param iteration_counter: Incremental counter
:type iteration_counter: int
:param total_items: total number items
:type total_items: int
:param progress_bar_length: Progress bar length
:type progress_bar_length: int
:returns: void
:rtype: void
def print_progress_bar(description, iteration_counter, total_items, progress_bar_length=20):
"""
Print progress bar
:param description: Training description
:type description: str
:param iteration_counter: Incremental counter
:type iteration_counter: int
:param total_items: total number items
:type total_items: int
:param progress_bar_length: Progress bar length
:type progress_bar_length: int
:returns: void
:rtype: void
"""
import sys
percent = float(iteration_counter) / total_items
hashes = '#' * int(round(percent * progress_bar_length))
spaces = ' ' * (progress_bar_length - len(hashes))
sys.stdout.write('\r{0}: [{1}] {2}%'.format(description, hashes + spaces, int(round(percent * 100))))
sys.stdout.flush()
if total_items == iteration_counter:
print('\r') |
Get the first match unit metric object supported by pint library
given a variation of unit metric names (Ex:['HOUR', 'hour']).
:param ureg: unit registry which units are defined and handled
:type ureg: pint.registry.UnitRegistry object
:param unit_variations: A list of strings with names of units
:type unit_variations: str
def get_unit(self, ureg, unit_variations):
"""
Get the first match unit metric object supported by pint library
given a variation of unit metric names (Ex:['HOUR', 'hour']).
:param ureg: unit registry which units are defined and handled
:type ureg: pint.registry.UnitRegistry object
:param unit_variations: A list of strings with names of units
:type unit_variations: str
"""
for unit in unit_variations:
try:
return getattr(ureg, unit)
except Exception:
continue
return None |
Returns the firt match `pint.unit.Unit` object for from_unit and
target_unit strings from a possible variation of metric unit names
supported by pint library.
:param ureg: unit registry which units are defined and handled
:type ureg: `pint.registry.UnitRegistry`
:param from_unit: source metric unit
:type from_unit: str
:param from_unit: target metric unit
:type from_unit: str
def get_valid_units(self, ureg, from_unit, target_unit):
"""
Returns the firt match `pint.unit.Unit` object for from_unit and
target_unit strings from a possible variation of metric unit names
supported by pint library.
:param ureg: unit registry which units are defined and handled
:type ureg: `pint.registry.UnitRegistry`
:param from_unit: source metric unit
:type from_unit: str
:param from_unit: target metric unit
:type from_unit: str
"""
from_unit_variations = [from_unit.lower(), from_unit.upper()]
target_unit_variations = [target_unit.lower(), target_unit.upper()]
from_unit = self.get_unit(ureg, from_unit_variations)
target_unit = self.get_unit(ureg, target_unit_variations)
return from_unit, target_unit |
Returns a response statement from a matched input statement.
:param match: It is a valid matched pattern from the input statement
:type: `_sre.SRE_Match`
def handle_matches(self, match):
"""
Returns a response statement from a matched input statement.
:param match: It is a valid matched pattern from the input statement
:type: `_sre.SRE_Match`
"""
response = Statement(text='')
from_parsed = match.group("from")
target_parsed = match.group("target")
n_statement = match.group("number")
if n_statement == 'a' or n_statement == 'an':
n_statement = '1.0'
n = mathparse.parse(n_statement, self.language.ISO_639.upper())
ureg = UnitRegistry()
from_parsed, target_parsed = self.get_valid_units(ureg, from_parsed, target_parsed)
if from_parsed is None or target_parsed is None:
response.confidence = 0.0
else:
from_value = ureg.Quantity(float(n), from_parsed)
target_value = from_value.to(target_parsed)
response.confidence = 1.0
response.text = str(target_value.magnitude)
return response |
This method is called when a logic adapter is unable to generate any
other meaningful response.
def get_default_response(self, input_statement):
"""
This method is called when a logic adapter is unable to generate any
other meaningful response.
"""
from random import choice
if self.default_responses:
response = choice(self.default_responses)
else:
try:
response = self.chatbot.storage.get_random()
except StorageAdapter.EmptyDatabaseException:
response = input_statement
self.chatbot.logger.info(
'No known response to the input was found. Selecting a random response.'
)
# Set confidence to zero because a random response is selected
response.confidence = 0
return response |
Provide an analysis of significant features in the string.
def time_question_features(self, text):
"""
Provide an analysis of significant features in the string.
"""
features = {}
# A list of all words from the known sentences
all_words = " ".join(self.positive + self.negative).split()
# A list of the first word in each of the known sentence
all_first_words = []
for sentence in self.positive + self.negative:
all_first_words.append(
sentence.split(' ', 1)[0]
)
for word in text.split():
features['first_word({})'.format(word)] = (word in all_first_words)
for word in text.split():
features['contains({})'.format(word)] = (word in all_words)
for letter in 'abcdefghijklmnopqrstuvwxyz':
features['count({})'.format(letter)] = text.lower().count(letter)
features['has({})'.format(letter)] = (letter in text.lower())
return features |
Determines whether it is appropriate for this
adapter to respond to the user input.
def can_process(self, statement):
"""
Determines whether it is appropriate for this
adapter to respond to the user input.
"""
response = self.process(statement)
self.cache[statement.text] = response
return response.confidence == 1 |
Takes a statement string.
Returns the equation from the statement with the mathematical terms solved.
def process(self, statement, additional_response_selection_parameters=None):
"""
Takes a statement string.
Returns the equation from the statement with the mathematical terms solved.
"""
from mathparse import mathparse
input_text = statement.text
# Use the result cached by the process method if it exists
if input_text in self.cache:
cached_result = self.cache[input_text]
self.cache = {}
return cached_result
# Getting the mathematical terms within the input statement
expression = mathparse.extract_expression(input_text, language=self.language.ISO_639.upper())
response = Statement(text=expression)
try:
response.text += ' = ' + str(
mathparse.parse(expression, language=self.language.ISO_639.upper())
)
# The confidence is 1 if the expression could be evaluated
response.confidence = 1
except mathparse.PostfixTokenEvaluationException:
response.confidence = 0
return response |
A filter that eliminates possibly repetitive responses to prevent
a chat bot from repeating statements that it has recently said.
def get_recent_repeated_responses(chatbot, conversation, sample=10, threshold=3, quantity=3):
"""
A filter that eliminates possibly repetitive responses to prevent
a chat bot from repeating statements that it has recently said.
"""
from collections import Counter
# Get the most recent statements from the conversation
conversation_statements = list(chatbot.storage.filter(
conversation=conversation,
order_by=['id']
))[sample * -1:]
text_of_recent_responses = [
statement.text for statement in conversation_statements
]
counter = Counter(text_of_recent_responses)
# Find the n most common responses from the conversation
most_common = counter.most_common(quantity)
return [
counted[0] for counted in most_common
if counted[1] >= threshold
] |
:param input_statement: A statement, that closely matches an input to the chat bot.
:type input_statement: Statement
:param response_list: A list of statement options to choose a response from.
:type response_list: list
:param storage: An instance of a storage adapter to allow the response selection
method to access other statements if needed.
:type storage: StorageAdapter
:return: The response statement with the greatest number of occurrences.
:rtype: Statement
def get_most_frequent_response(input_statement, response_list, storage=None):
"""
:param input_statement: A statement, that closely matches an input to the chat bot.
:type input_statement: Statement
:param response_list: A list of statement options to choose a response from.
:type response_list: list
:param storage: An instance of a storage adapter to allow the response selection
method to access other statements if needed.
:type storage: StorageAdapter
:return: The response statement with the greatest number of occurrences.
:rtype: Statement
"""
matching_response = None
occurrence_count = -1
logger = logging.getLogger(__name__)
logger.info('Selecting response with greatest number of occurrences.')
for statement in response_list:
count = len(list(storage.filter(
text=statement.text,
in_response_to=input_statement.text)
))
# Keep the more common statement
if count >= occurrence_count:
matching_response = statement
occurrence_count = count
# Choose the most commonly occuring matching response
return matching_response |
:param input_statement: A statement, that closely matches an input to the chat bot.
:type input_statement: Statement
:param response_list: A list of statement options to choose a response from.
:type response_list: list
:param storage: An instance of a storage adapter to allow the response selection
method to access other statements if needed.
:type storage: StorageAdapter
:return: Return the first statement in the response list.
:rtype: Statement
def get_first_response(input_statement, response_list, storage=None):
"""
:param input_statement: A statement, that closely matches an input to the chat bot.
:type input_statement: Statement
:param response_list: A list of statement options to choose a response from.
:type response_list: list
:param storage: An instance of a storage adapter to allow the response selection
method to access other statements if needed.
:type storage: StorageAdapter
:return: Return the first statement in the response list.
:rtype: Statement
"""
logger = logging.getLogger(__name__)
logger.info('Selecting first response from list of {} options.'.format(
len(response_list)
))
return response_list[0] |
:param input_statement: A statement, that closely matches an input to the chat bot.
:type input_statement: Statement
:param response_list: A list of statement options to choose a response from.
:type response_list: list
:param storage: An instance of a storage adapter to allow the response selection
method to access other statements if needed.
:type storage: StorageAdapter
:return: Choose a random response from the selection.
:rtype: Statement
def get_random_response(input_statement, response_list, storage=None):
"""
:param input_statement: A statement, that closely matches an input to the chat bot.
:type input_statement: Statement
:param response_list: A list of statement options to choose a response from.
:type response_list: list
:param storage: An instance of a storage adapter to allow the response selection
method to access other statements if needed.
:type storage: StorageAdapter
:return: Choose a random response from the selection.
:rtype: Statement
"""
from random import choice
logger = logging.getLogger(__name__)
logger.info('Selecting a response from list of {} options.'.format(
len(response_list)
))
return choice(response_list) |
Compare the two input statements.
:return: The percent of similarity between the text of the statements.
:rtype: float
def compare(self, statement_a, statement_b):
"""
Compare the two input statements.
:return: The percent of similarity between the text of the statements.
:rtype: float
"""
# Return 0 if either statement has a falsy text value
if not statement_a.text or not statement_b.text:
return 0
# Get the lowercase version of both strings
statement_a_text = str(statement_a.text.lower())
statement_b_text = str(statement_b.text.lower())
similarity = SequenceMatcher(
None,
statement_a_text,
statement_b_text
)
# Calculate a decimal percent of the similarity
percent = round(similarity.ratio(), 2)
return percent |
Compare the two input statements.
:return: The percent of similarity between the closest synset distance.
:rtype: float
def compare(self, statement_a, statement_b):
"""
Compare the two input statements.
:return: The percent of similarity between the closest synset distance.
:rtype: float
"""
document_a = self.nlp(statement_a.text)
document_b = self.nlp(statement_b.text)
return document_a.similarity(document_b) |
Return the calculated similarity of two
statements based on the Jaccard index.
def compare(self, statement_a, statement_b):
"""
Return the calculated similarity of two
statements based on the Jaccard index.
"""
# Make both strings lowercase
document_a = self.nlp(statement_a.text.lower())
document_b = self.nlp(statement_b.text.lower())
statement_a_lemmas = set([
token.lemma_ for token in document_a if not token.is_stop
])
statement_b_lemmas = set([
token.lemma_ for token in document_b if not token.is_stop
])
# Calculate Jaccard similarity
numerator = len(statement_a_lemmas.intersection(statement_b_lemmas))
denominator = float(len(statement_a_lemmas.union(statement_b_lemmas)))
ratio = numerator / denominator
return ratio |
Return the class for the statement model.
def get_statement_model(self):
"""
Return the class for the statement model.
"""
from chatterbot.conversation import Statement
# Create a storage-aware statement
statement = Statement
statement.storage = self
return statement |
Return Statement object when given data
returned from Mongo DB.
def mongo_to_object(self, statement_data):
"""
Return Statement object when given data
returned from Mongo DB.
"""
Statement = self.get_model('statement')
statement_data['id'] = statement_data['_id']
return Statement(**statement_data) |
Returns a list of statements in the database
that match the parameters specified.
def filter(self, **kwargs):
"""
Returns a list of statements in the database
that match the parameters specified.
"""
import pymongo
page_size = kwargs.pop('page_size', 1000)
order_by = kwargs.pop('order_by', None)
tags = kwargs.pop('tags', [])
exclude_text = kwargs.pop('exclude_text', None)
exclude_text_words = kwargs.pop('exclude_text_words', [])
persona_not_startswith = kwargs.pop('persona_not_startswith', None)
search_text_contains = kwargs.pop('search_text_contains', None)
if tags:
kwargs['tags'] = {
'$in': tags
}
if exclude_text:
if 'text' not in kwargs:
kwargs['text'] = {}
elif 'text' in kwargs and isinstance(kwargs['text'], str):
text = kwargs.pop('text')
kwargs['text'] = {
'$eq': text
}
kwargs['text']['$nin'] = exclude_text
if exclude_text_words:
if 'text' not in kwargs:
kwargs['text'] = {}
elif 'text' in kwargs and isinstance(kwargs['text'], str):
text = kwargs.pop('text')
kwargs['text'] = {
'$eq': text
}
exclude_word_regex = '|'.join([
'.*{}.*'.format(word) for word in exclude_text_words
])
kwargs['text']['$not'] = re.compile(exclude_word_regex)
if persona_not_startswith:
if 'persona' not in kwargs:
kwargs['persona'] = {}
elif 'persona' in kwargs and isinstance(kwargs['persona'], str):
persona = kwargs.pop('persona')
kwargs['persona'] = {
'$eq': persona
}
kwargs['persona']['$not'] = re.compile('^bot:*')
if search_text_contains:
or_regex = '|'.join([
'{}'.format(word) for word in search_text_contains.split(' ')
])
kwargs['search_text'] = re.compile(or_regex)
mongo_ordering = []
if order_by:
# Sort so that newer datetimes appear first
if 'created_at' in order_by:
order_by.remove('created_at')
mongo_ordering.append(('created_at', pymongo.DESCENDING, ))
for order in order_by:
mongo_ordering.append((order, pymongo.ASCENDING))
total_statements = self.statements.find(kwargs).count()
for start_index in range(0, total_statements, page_size):
if mongo_ordering:
for match in self.statements.find(kwargs).sort(mongo_ordering).skip(start_index).limit(page_size):
yield self.mongo_to_object(match)
else:
for match in self.statements.find(kwargs).skip(start_index).limit(page_size):
yield self.mongo_to_object(match) |
Creates a new statement matching the keyword arguments specified.
Returns the created statement.
def create(self, **kwargs):
"""
Creates a new statement matching the keyword arguments specified.
Returns the created statement.
"""
Statement = self.get_model('statement')
if 'tags' in kwargs:
kwargs['tags'] = list(set(kwargs['tags']))
if 'search_text' not in kwargs:
kwargs['search_text'] = self.tagger.get_bigram_pair_string(kwargs['text'])
if 'search_in_response_to' not in kwargs:
if kwargs.get('in_response_to'):
kwargs['search_in_response_to'] = self.tagger.get_bigram_pair_string(kwargs['in_response_to'])
inserted = self.statements.insert_one(kwargs)
kwargs['id'] = inserted.inserted_id
return Statement(**kwargs) |
Creates multiple statement entries.
def create_many(self, statements):
"""
Creates multiple statement entries.
"""
create_statements = []
for statement in statements:
statement_data = statement.serialize()
tag_data = list(set(statement_data.pop('tags', [])))
statement_data['tags'] = tag_data
if not statement.search_text:
statement_data['search_text'] = self.tagger.get_bigram_pair_string(statement.text)
if not statement.search_in_response_to and statement.in_response_to:
statement_data['search_in_response_to'] = self.tagger.get_bigram_pair_string(statement.in_response_to)
create_statements.append(statement_data)
self.statements.insert_many(create_statements) |
Returns a random statement from the database
def get_random(self):
"""
Returns a random statement from the database
"""
from random import randint
count = self.count()
if count < 1:
raise self.EmptyDatabaseException()
random_integer = randint(0, count - 1)
statements = self.statements.find().limit(1).skip(random_integer)
return self.mongo_to_object(list(statements)[0]) |
Add a list of strings to the statement as tags.
def add_tags(self, *tags):
"""
Add a list of strings to the statement as tags.
"""
self.tags.extend([
Tag(name=tag) for tag in tags
]) |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.