index
int64 | repo_name
string | branch_name
string | path
string | content
string | import_graph
string |
|---|---|---|---|---|---|
30,402,103
|
18505161903/fwshare
|
refs/heads/master
|
/22/backups data.py
|
# -*- coding:utf-8 -*-
import time
import os
DBUSER='myadmin' #用户
DBPASS='redhat' #密码
IP='192.168.122.11' #主机
DATA_DIR='/data' #目录
PATH_DUMP='/usr/local/mongodb/bin/mongodump' #命令路径
BACKITEMS=[ "%s -h %s:27017 -u %s -p %s -o %s" % (PATH,IP,DBUSER,DBPASS,DATA_DIR) ]
def backData():
try:
for item in BACKITEMS:
print(item)
print(os.system(item))
except RuntimeError,e:
print(str(e))
if __name__=="__main__":
backData()
|
{"/opendatatools/realestate/__init__.py": ["/opendatatools/realestate/realestate_interface.py"], "/opendatatools/stock/__init__.py": ["/opendatatools/stock/stock_interface.py"], "/opendatatools/common/__init__.py": ["/opendatatools/common/string_util.py"], "/opendatatools/stock/stock_agent.py": ["/opendatatools/common/__init__.py"], "/opendatatools/hkex/__init__.py": ["/opendatatools/hkex/hkex_interface.py"], "/opendatatools/stock/stock_interface.py": ["/opendatatools/stock/stock_agent.py", "/opendatatools/common/__init__.py"], "/opendatatools/fx/fx_interface.py": ["/opendatatools/common/__init__.py"]}
|
30,402,104
|
18505161903/fwshare
|
refs/heads/master
|
/example/52.py
|
# encoding: utf-8
import pymongo,json
import pandas as pd
from pandas import Series,DataFrame
from scipy.stats import pearsonr
import copy
from datetime import datetime, date, timedelta
pd.set_option('display.width', None) # 设置字符显示宽度
pd.set_option('display.max_rows', None) # 设置显示最大行
pd.set_option('display.max_columns', None) # 设置显示最大行
def wh(var='NI'):
client = pymongo.MongoClient('localhost', 27017)
futures = client.futures
unit = futures.unit
position = futures.position
market = futures.market
positions = futures.positions
market = DataFrame(list(market.find({'date': {'$gte': '20181101'}})))
position = DataFrame(list(position.find({'date': {'$gte': '20181101'}})))
# 整理期货公司
party_name = position[position['date'] >= '20181123']
party_name = party_name[party_name['variety'] == var]
long_party_name = party_name['long_party_name']
short_party_name = party_name['short_party_name']
party_name = long_party_name.append(short_party_name)
party_name = party_name.drop_duplicates(keep='first').dropna()
long = position.groupby(['date', 'variety', 'long_party_name'])[['long_openIntr']].sum()
short = position.groupby(['date', 'variety', 'short_party_name'])[['short_openIntr']].sum()
# 合并
frames = [long, short]
position = pd.concat(frames, axis=1, sort=True).fillna(0).reset_index()
# 净持仓
position['净持仓'] = position.apply(lambda x: x['long_openIntr'] - x['short_openIntr'], axis=1)
# 字段更名
position = position.rename(columns={'level_0': 'date', 'level_1': 'variety', 'level_2': 'mem'})
vars = position[position['variety'] == var]
for i in party_name:
mem = vars[vars['mem'] == i]
# print(mem)
position_behind = mem.shift(1)
# # 合并滞后和原始数据
all_position = pd.merge(position, position_behind, right_index=True, left_index=True)
# print(all_position)
all_position = all_position[
['date_x', 'variety_x', 'mem_x', 'long_openIntr_x', 'short_openIntr_x', '净持仓_x', '净持仓_y']].dropna()
# print(all_position)
all_position['净持仓变化量'] = all_position.apply(lambda x: x['净持仓_x'] - x['净持仓_y'], axis=1)
# print(all_position)
# 更名
all_position = all_position.rename(
columns={'date_x': 'date', 'variety_x': 'variety', 'mem_x': 'mam', 'long_openIntr_x': 'long_openIntr',
'short_openIntr_x': 'short_openIntr', '净持仓_x': '当日净持仓', '净持仓_y': '昨日净持仓'})
# # #涨跌数
market['change'] = market.apply(lambda x: x['set_close'] - x['set_open'], axis=1)
vars1 = market[market['variety'] == var]
chg = vars1[['date', 'variety', 'change']]
# # 合并
hb = pd.merge(chg, all_position, on=['date', 'variety'], how='outer').dropna().drop_duplicates()
# df=pd.DataFrame(hb)
chgs = hb['change']
nets = hb['净持仓变化量']
# #相关系数
p = pearsonr(chgs, nets)
print(i, p)
# print(df.tail(1))
# chgs=df['change']
# nets=df['净持仓变化量']
# try:
# p=pearsonr(chgs,nets)[0]
# print(i,p)
# except:
# continue
if __name__ == '__main__':
wh(var='MA')
|
{"/opendatatools/realestate/__init__.py": ["/opendatatools/realestate/realestate_interface.py"], "/opendatatools/stock/__init__.py": ["/opendatatools/stock/stock_interface.py"], "/opendatatools/common/__init__.py": ["/opendatatools/common/string_util.py"], "/opendatatools/stock/stock_agent.py": ["/opendatatools/common/__init__.py"], "/opendatatools/hkex/__init__.py": ["/opendatatools/hkex/hkex_interface.py"], "/opendatatools/stock/stock_interface.py": ["/opendatatools/stock/stock_agent.py", "/opendatatools/common/__init__.py"], "/opendatatools/fx/fx_interface.py": ["/opendatatools/common/__init__.py"]}
|
30,402,105
|
18505161903/fwshare
|
refs/heads/master
|
/example/futures_main.py
|
import pymongo
import pandas as pd
from pandas import Series,DataFrame
import matplotlib as plt
import matplotlib.dates as mdate
from IPython.core.display import display, HTML
import json
display(HTML("<style>.container { width:100% !important; }</style>"))
#二行即可搞定画图中文乱码
plt.rcParams['font.sans-serif']=['SimHei'] #用来正常显示中文标签
plt.rcParams['axes.unicode_minus']=False #用来正常显示负号
pd.set_option('display.width', None) # 设置字符显示宽度
pd.set_option('display.max_rows', None) # 设置显示最大行
pd.set_option('display.max_columns', None) # 设置显示最大行
#连接数据库
client = pymongo.MongoClient('localhost',27017)
futures = client.futures
marketdata = futures.market2
unit = futures.unit
position = futures.position
main=futures.main
date='20110101'
# 加载数据
position = DataFrame(list(position.find({'date': {'$gte': date}})))
marketdata = DataFrame(list(marketdata.find({'date': {'$gte': date}})))
# 主力合约
del marketdata['_id']
# 选取条件
# marketdata=marketdata.loc[marketdata['open_interest']>1000]
# 以日期和持仓量2个字段分组筛选唯一主力合约
marketdata=marketdata.groupby(['date','variety']).apply(lambda x: x[x.open_interest==x.open_interest.max()])
# 去重交易合约
marketdata=marketdata.drop_duplicates()
# 删除date variety两个列,以免报警
del marketdata['date']
del marketdata['variety']
marketdata = marketdata.copy()
print(marketdata.head())
# 净持仓变动量
netPosition=position.groupby(['date','variety'])[['long_openIntr','short_openIntr']].sum()
netPosition['净持仓']=netPosition.apply(lambda x:x['long_openIntr']-x['short_openIntr'],axis=1)
netPosition['上一日净持仓']=netPosition.groupby('variety')['净持仓'].shift(1)
netPosition['净持仓变化量']=netPosition.apply(lambda x: x['净持仓']-x['上一日净持仓'],axis=1)
netPosition=netPosition.dropna().reset_index()
# print(netPosition.head())
# 净持仓,价格变化量合并
df=pd.merge(netPosition,marketdata,on=['date', 'variety'],how='outer')
df['交易信号'] = df.apply(lambda x: 0 if x['净持仓变化量']*x['change']>=0 else 1 if x['净持仓变化量']>0 else -1,axis=1)
# print(df.dropna())
# df=df.groupby(['date','variety']).apply(lambda x: x['交易信号'].shift(1))
df=pd.DataFrame(df.dropna())
# print(df.dropna())
# df.to_csv(r"c:\signal.csv",mode='a',encoding='ANSI',header=True)
main.insert_many(json.loads(df.T.to_json()).values())
print(json.loads(df.T.to_json()).values())
|
{"/opendatatools/realestate/__init__.py": ["/opendatatools/realestate/realestate_interface.py"], "/opendatatools/stock/__init__.py": ["/opendatatools/stock/stock_interface.py"], "/opendatatools/common/__init__.py": ["/opendatatools/common/string_util.py"], "/opendatatools/stock/stock_agent.py": ["/opendatatools/common/__init__.py"], "/opendatatools/hkex/__init__.py": ["/opendatatools/hkex/hkex_interface.py"], "/opendatatools/stock/stock_interface.py": ["/opendatatools/stock/stock_agent.py", "/opendatatools/common/__init__.py"], "/opendatatools/fx/fx_interface.py": ["/opendatatools/common/__init__.py"]}
|
30,402,106
|
18505161903/fwshare
|
refs/heads/master
|
/example/测试.py
|
# encoding: utf-8
import datetime
import pandas as pd
from pandas import DataFrame
import json
from pymongo import MongoClient
pd.set_option('display.width', None) # 设置字符显示宽度
pd.set_option('display.max_rows', None) # 设置显示最大行
pd.set_option('display.max_columns', None) # 设置显示最大行
client = MongoClient('localhost', 27017)
db = client.futures3
position = db.position
date = "20190920"
#加载数据
position = DataFrame(list(position.find({'date': {'$gte': date}})))
position=position[position['variety']=='MA']
print(position.head(4))
|
{"/opendatatools/realestate/__init__.py": ["/opendatatools/realestate/realestate_interface.py"], "/opendatatools/stock/__init__.py": ["/opendatatools/stock/stock_interface.py"], "/opendatatools/common/__init__.py": ["/opendatatools/common/string_util.py"], "/opendatatools/stock/stock_agent.py": ["/opendatatools/common/__init__.py"], "/opendatatools/hkex/__init__.py": ["/opendatatools/hkex/hkex_interface.py"], "/opendatatools/stock/stock_interface.py": ["/opendatatools/stock/stock_agent.py", "/opendatatools/common/__init__.py"], "/opendatatools/fx/fx_interface.py": ["/opendatatools/common/__init__.py"]}
|
30,402,107
|
18505161903/fwshare
|
refs/heads/master
|
/futures/反套利.py
|
# encoding: utf-8
import pandas as pd
import pymongo
from pandas import DataFrame
import matplotlib.pyplot as plt
pd.set_option('display.width', None) # 设置字符显示宽度
pd.set_option('display.max_rows', None) # 设置显示最大行
pd.set_option('display.max_columns', None) # 设置显示最大行
plt.rcParams['font.sans-serif'] = ['SimHei'] # 用来正常显示中文标签
plt.rcParams['axes.unicode_minus'] = False # 用来正常显示负号
client = pymongo.MongoClient('localhost', 27017)
futures = client.futures2
market = futures.market
position = futures.position
begin = '20200320'
end ='20200508'
symbol1 = 'BU2012'
symbol2 = 'BU2006'
BrokerID = '永安期货'
market1 = DataFrame(list(market.find({'date': {'$gte': begin}, 'symbol':symbol1}))).drop_duplicates(['date','variety','symbol'], 'last')
market2 = DataFrame(list(market.find({'date': {'$gte': begin}, 'symbol': symbol2}))).drop_duplicates(['date','variety','symbol'], 'last')
position1 = DataFrame(list(position.find({'date': {'$gte': begin},'symbol': symbol1}))).dropna().drop_duplicates(['date','variety','symbol','long_party_name'], 'last')
position2 = DataFrame(list(position.find({'date': {'$gte': begin},'symbol': symbol2,}))).dropna().drop_duplicates(['date','variety','symbol','long_party_name'], 'last')
# print(position2)
# 主力收盘
market1[symbol1] = market1['close']
# 次主力收盘
market2[symbol2] = market2['close']
#######
#position1
data3=position1[position1['long_party_name'] == BrokerID]
data3=data3[['date','symbol','long_party_name','long_openIntr']]
data3=data3.groupby(['date','symbol','long_party_name'])[['long_openIntr']].sum()
data4=position1[position1['short_party_name'] == BrokerID]
data4=data4[['date','symbol','short_party_name','short_openIntr']]
data4=data4.groupby(['date','symbol','short_party_name'])[['short_openIntr']].sum()
# #并集
data5=pd.merge(data3,data4, on=['date','symbol'],how='outer')
data5['会员简称']=data5.apply(lambda x: BrokerID,axis=1)
#nan缺失值填充fillna()为0
data5=data5.fillna(0)
#选择需要显示的字段
data5=data5[['会员简称','long_openIntr','short_openIntr']]
position1=data5.reset_index(['symbol','date'])
# print(position1)
#########
# position2
data3=position2[position2['long_party_name'] == BrokerID]
data3=data3[['date','symbol','long_party_name','long_openIntr']]
data3=data3.groupby(['date','symbol','long_party_name'])[['long_openIntr']].sum()
data4=position2[position2['short_party_name'] == BrokerID]
data4=data4[['date','symbol','short_party_name','short_openIntr']]
data4=data4.groupby(['date','symbol','short_party_name'])[['short_openIntr']].sum()
# #并集
data5=pd.merge(data3,data4, on=['date','symbol'],how='outer')
data5['会员简称']=data5.apply(lambda x: BrokerID,axis=1)
#nan缺失值填充fillna()为0
data5=data5.fillna(0)
#选择需要显示的字段
data5=data5[['会员简称','long_openIntr','short_openIntr']]
position2=data5.reset_index(['symbol','date'])
# print(position2)
#########
# #两表合并
merge = pd.merge(market1,market2, on=['date'], how='left').sort_values(['date'],ascending=True)
merge = merge[['date',symbol1,symbol2]]
merge['价差'] = merge.apply(lambda x: x[symbol1] - x[symbol2], axis=1)
净持仓1=symbol1+'NP'
净持仓2=symbol2+'NP'
position1[净持仓1]=position1.apply(lambda x:x['long_openIntr']-x['short_openIntr'],axis=1)
position2[净持仓2]=position2.apply(lambda x:x['long_openIntr']-x['short_openIntr'],axis=1)
merge1 = pd.merge(position1,position2, on=['date'], how='outer').fillna(0)
merge1=merge1[['date',净持仓1,净持仓2]]
merge2=pd.merge(merge,merge1,on=['date'],how='outer')
merge2
print(merge2)
# 画图
dates=merge2['date']
plt.plot(dates,merge2['价差'],label='价差',color='r')
plt.ylabel('价差')
plt.tick_params(axis='x',rotation=45) #日期斜体
plt.twinx()
plt.plot(dates,merge2[净持仓1],label=净持仓1,color='b')
plt.plot(dates,merge2[净持仓2],label=净持仓2,color='y')
plt.legend()
plt.ylabel('净持仓')
plt.grid(linestyle="--", alpha=0.3)
plt.title(BrokerID+' '+merge2['date'].iloc[0]+" "+symbol1+" "+symbol2+' '+merge2['date'].iloc[-1])
plt.show()
|
{"/opendatatools/realestate/__init__.py": ["/opendatatools/realestate/realestate_interface.py"], "/opendatatools/stock/__init__.py": ["/opendatatools/stock/stock_interface.py"], "/opendatatools/common/__init__.py": ["/opendatatools/common/string_util.py"], "/opendatatools/stock/stock_agent.py": ["/opendatatools/common/__init__.py"], "/opendatatools/hkex/__init__.py": ["/opendatatools/hkex/hkex_interface.py"], "/opendatatools/stock/stock_interface.py": ["/opendatatools/stock/stock_agent.py", "/opendatatools/common/__init__.py"], "/opendatatools/fx/fx_interface.py": ["/opendatatools/common/__init__.py"]}
|
30,402,108
|
18505161903/fwshare
|
refs/heads/master
|
/example/test_date.py
|
import pymongo
# myclient = pymongo.MongoClient("mongodb://localhost:27017/")
# mydb = myclient["runoobdb"]
# mycol = mydb["sites"]
#
# for x in mycol.find():
# print(x)
client = pymongo.MongoClient('localhost', 27017)
futures = client["futures"]
mainSignal = futures["mainSignal"]
mainSignal.aggregate([{"$group":{"_id":"$variety","num_tutorial":{"$sum":1}}}])
print(mainSignal)
|
{"/opendatatools/realestate/__init__.py": ["/opendatatools/realestate/realestate_interface.py"], "/opendatatools/stock/__init__.py": ["/opendatatools/stock/stock_interface.py"], "/opendatatools/common/__init__.py": ["/opendatatools/common/string_util.py"], "/opendatatools/stock/stock_agent.py": ["/opendatatools/common/__init__.py"], "/opendatatools/hkex/__init__.py": ["/opendatatools/hkex/hkex_interface.py"], "/opendatatools/stock/stock_interface.py": ["/opendatatools/stock/stock_agent.py", "/opendatatools/common/__init__.py"], "/opendatatools/fx/fx_interface.py": ["/opendatatools/common/__init__.py"]}
|
30,402,109
|
18505161903/fwshare
|
refs/heads/master
|
/example/fushare1.1.11/basis.py
|
# -*- coding:utf-8 -*-
'''
Created on 2018年07月12日
@author: lowin
@contact: li783170560@126.com
从生意社网站爬取大宗商品现货价格,及相应基差
网站数据含有20110104至今
'''
import requests
import re
import pandas as pd
import datetime
import time
from fushare import cons
from fushare.symbolVar import *
calendar = cons.get_calendar()
def get_spotPrice_daily(start = None, end = None, vars = cons.vars):
"""
获取大宗商品现货价格,及相应基差
Parameters
------
start: 开始日期 format:YYYY-MM-DD 或 YYYYMMDD 或 datetime.date对象 为空时为当天
end: 结束数据 format:YYYY-MM-DD 或 YYYYMMDD 或 datetime.date对象 为空时为当天
vars: 合约品种如RB、AL等列表 为空时为所有商品
Return
-------
DataFrame
展期收益率数据(DataFrame):
var 商品品种 string
SP 现货价格 float
nearSymbol 临近交割合约 string
nearPrice 临近交割合约结算价 float
domSymbol 主力合约 string
domPrice 主力合约结算价 float
nearBasis 临近交割合约相对现货的基差 float
domBasis 主力合约相对现货的基差 float
nearBasisRate 临近交割合约相对现货的基差率 float
domBasisRate 主力合约相对现货的基差率 float
date 日期 string YYYYMMDD
"""
start = cons.convert_date(start) if start is not None else datetime.date.today()
end = cons.convert_date(end) if end is not None else cons.convert_date(cons.get_latestDataDate(datetime.datetime.now()))
df_list=[]
while start <= end:
print(start)
df = get_spotPrice(start,vars)
if df is False:
return pd.concat(df_list).reset_index(drop=True)
elif df is not None:
df_list.append(df)
start += datetime.timedelta(days = 1)
if len(df_list) > 0:
return pd.concat(df_list).reset_index(drop=True)
def get_spotPrice(date = None,vars = cons.vars):
"""
获取某一天大宗商品现货价格,及相应基差
Parameters
------
date: 开始日期 format:YYYY-MM-DD 或 YYYYMMDD 或 datetime.date对象 为空时为当天
vars: 合约品种如RB、AL等列表 为空时为所有商品
Return
-------
DataFrame
展期收益率数据(DataFrame):
var 商品品种 string
SP 现货价格 float
nearSymbol 临近交割合约 string
nearPrice 临近交割合约结算价 float
domSymbol 主力合约 string
domPrice 主力合约结算价 float
nearBasis 临近交割合约相对现货的基差 float
domBasis 主力合约相对现货的基差 float
nearBasisRate 临近交割合约相对现货的基差率 float
domBasisRate 主力合约相对现货的基差率 float
date 日期 string YYYYMMDD
"""
date = cons.convert_date(date) if date is not None else datetime.date.today()
if date < datetime.date(2011,1,4):
raise Exception("数据源开始日期为20110104,请修改获取数据时段检查")
if date.strftime('%Y%m%d') not in calendar:
print('%s非交易日' %date.strftime('%Y%m%d'))
return None
u1 = cons.SYS_SPOTPRICE_LATEST_URL
u2 = cons.SYS_SPOTPRICE_URL %date.strftime('%Y-%m-%d')
i = 1
while True:
for url in [u2,u1]:
try:
r=requests.get(url,timeout=2)
string = pd.read_html(r.text)[0].loc[1,1]
news = ''.join(re.findall(r'[0-9]',string))
if news[3:11] == date.strftime('%Y%m%d'):
records = _check_information(pd.read_html(r.text)[1],date)
records.index = records['var']
vars_inMarket = [i for i in vars if i in records.index]
return records.loc[vars_inMarket,:].reset_index(drop=True)
else:
time.sleep(3)
except Exception as e:
print('%s日生意社数据连接失败,第%s次尝试,最多5次' % (date.strftime('%Y-%m-%d'), str(i)))
i+=1
if i > 5:
print('%s日生意社数据连接失败,已超过5次,您的地址被网站墙了,请保存好返回数据,稍后从该日期起重试' % date.strftime('%Y-%m-%d'))
return False
def _check_information(df, date):
df = df.loc[:, [0, 1, 2, 3, 7, 8]]
df.columns = ['var', 'SP', 'nearSymbol', 'nearPrice', 'domSymbol', 'domPrice']
records=pd.DataFrame()
for string in df['var'].tolist():
if string == 'PTA':
news = 'PTA'
else:
news = ''.join(re.findall(r'[\u4e00-\u9fa5]', string))
if news != '' and news not in ['商品', '价格', '上海期货交易所', '郑州商品交易所', '大连商品交易所']:
var = chinese_to_english(news)
record = df[df['var'] == string]
record.loc[:,'var'] = var
record.loc[:,'SP'] = record.loc[:,'SP'].astype(float)
if var == 'JD':
record.loc[:,'SP'] = float(record['SP']) * 500
if var == 'FG':
record.loc[:,'SP'] = record['SP'] * 80
records = records.append(record)
records.loc[:, ['nearPrice', 'domPrice', 'SP']] = records.loc[:, ['nearPrice', 'domPrice', 'SP']].astype(
'float')
records.loc[:, 'nearSymbol'] = records['nearSymbol'].replace('[^0-9]*(\d*)$', '\g<1>', regex=True)
records.loc[:, 'domSymbol'] = records['domSymbol'].replace('[^0-9]*(\d*)$', '\g<1>', regex=True)
records.loc[:, 'nearSymbol'] = records['var'] + records.loc[:, 'nearSymbol'].astype('int').astype('str')
records.loc[:, 'domSymbol'] = records['var'] + records.loc[:, 'domSymbol'].astype('int').astype('str')
records['nearSymbol'] = records['nearSymbol'].apply(lambda x: x.lower() if x[:-4] in cons.market_var['shfe']+cons.market_var['dce'] else x)
records.loc[:,'domSymbol'] = records.loc[:,'domSymbol'].apply(lambda x: x.lower() if x[:-4] in cons.market_var['shfe']+cons.market_var['dce'] else x)
records.loc[:,'nearSymbol'] = records.loc[:,'nearSymbol'].apply(lambda x: x[:-4]+x[-3:] if x[:-4] in cons.market_var['czce'] else x)
records.loc[:,'domSymbol'] = records.loc[:,'domSymbol'].apply(lambda x: x[:-4]+x[-3:] if x[:-4] in cons.market_var['czce'] else x)
records['nearBasis'] = records['nearPrice'] - records['SP']
records['domBasis'] = records['domPrice'] - records['SP']
records['nearBasisRate'] = records['nearPrice']/records['SP']-1
records['domBasisRate'] = records['domPrice']/records['SP']-1
records.loc[:, 'date'] = date.strftime('%Y%m%d')
return records
if __name__ == '__main__':
df = get_spotPrice_daily(start ='20130327', end ='20180918')
df.to_csv('E://spot2.csv')
|
{"/opendatatools/realestate/__init__.py": ["/opendatatools/realestate/realestate_interface.py"], "/opendatatools/stock/__init__.py": ["/opendatatools/stock/stock_interface.py"], "/opendatatools/common/__init__.py": ["/opendatatools/common/string_util.py"], "/opendatatools/stock/stock_agent.py": ["/opendatatools/common/__init__.py"], "/opendatatools/hkex/__init__.py": ["/opendatatools/hkex/hkex_interface.py"], "/opendatatools/stock/stock_interface.py": ["/opendatatools/stock/stock_agent.py", "/opendatatools/common/__init__.py"], "/opendatatools/fx/fx_interface.py": ["/opendatatools/common/__init__.py"]}
|
30,402,110
|
18505161903/fwshare
|
refs/heads/master
|
/futures/市值比例.py
|
import pymongo
import pandas as pd
import matplotlib.pyplot as plt
import matplotlib
import json
from pandas import Series,DataFrame
pd.set_option('display.width', None) # 设置字符显示宽度
pd.set_option('display.max_rows', None) # 设置显示最大行
pd.set_option('display.max_columns', None) # 设置显示最大行
#二行即可搞定画图中文乱码
plt.rcParams['font.sans-serif']=['SimHei'] #用来正常显示中文标签
plt.rcParams['axes.unicode_minus']=False #用来正常显示负号
from IPython.core.display import display, HTML
#连接数据库
client = pymongo.MongoClient('localhost',27017)
futures = client.futures2
market = futures.market
unit = futures.unit
position = futures.position
begin = DataFrame(list(market.find({}).sort([('_id', -1)]).limit(1)))
begin = begin['date'][0]
print(begin)
date = begin
#查询会员
# BrokerID='海通期货'
BrokerID='永安期货'
# BrokerID='徽商期货'
# BrokerID='浙商期货'
#加载数据
market = DataFrame(list(market.find({'date': {'$gte': date}})))
unit = DataFrame(list(unit.find()))
position = DataFrame(list(position.find({'date': {'$gte': date}}))).dropna()
# #选择需要显示的字段
market = market.copy()
# 指数收盘
market['cv'] = market.apply(lambda x: x['close'] * x['open_interest'], axis=1)
closes = market.groupby(['date', 'variety'])[['cv', 'open_interest']].sum()
#待解决问题:根据最小价位变动,给收盘价做五舍四入处理
closes['close_index'] = closes['cv'] / closes['open_interest']
market = closes.reset_index()[['date','variety','close_index']]
unit=unit[[ 'variety','unit']]
# #汇总合约
data3=position[(position['long_party_name'] == BrokerID)]
data3=data3[['date','variety','long_party_name','long_openIntr']]
data3=data3.groupby(['date','variety','long_party_name'])[['long_openIntr']].sum()
print(data3)
data4=position[(position['short_party_name'] == BrokerID)]
data4=data4[['date','variety','short_party_name','short_openIntr']]
data4=data4.groupby(['date','variety','short_party_name'])[['short_openIntr']].sum()
print(data4)
# #并集
data5=pd.merge(data3,data4, on=['date','variety'],how='outer')
data5['会员简称']=data5.apply(lambda x: BrokerID,axis=1)
data5.tail(100)
#nan缺失值填充fillna()为0
data5=data5.fillna(0)
data5['净持仓']=data5.apply(lambda x: x['long_openIntr']-x['short_openIntr'],axis=1)
# df['当日涨幅']=df.apply(lambda x: x['收盘']-x['收盘'].shift(1)-1,axis=1)
#选择需要显示的字段
data5=data5[['会员简称','long_openIntr','short_openIntr','净持仓']]
data5=data5.reset_index(['variety','date'])
netpostion=data5.set_index('date')
netpostion=data5.pivot_table('净持仓',index='date',columns='variety',fill_value=0)
# print(netpostion)
#合约价值
contractValue=pd.merge(market,unit,how='left',sort=False).drop_duplicates()
contractValue['contractValue'] = contractValue.apply(lambda x: x['close_index']*x['unit'],axis=1)
contractValue=contractValue[['date','variety','contractValue']].fillna(0)
# #值替换replace()
# # contractValue=contractValue.replace(['TA'],'PTA')
sz=pd.merge(data5,contractValue,on=['date','variety'],how='left')
# #净持仓价值
sz['净持仓价值']=sz.apply(lambda x: x['净持仓']*x['contractValue']/10000,axis=1)
sznet=sz[['date','variety','会员简称','净持仓价值']]
sz=sznet.sort_values(by='净持仓价值')
sz=sz.pivot_table('净持仓价值',index=['date','会员简称'],columns='variety',fill_value=0)
sz=sz.copy()
# sz['化工板块']=sz[['RU','MA','V','L','PP','BU','TA']].sum(axis=1)
# sz['油脂板块']=sz[['Y','P','OI']].sum(axis=1)
# sz['有色板块']=sz[['CU','AL','ZN','NI','PB']].sum(axis=1)
# sz['黑色板块']=sz[['RB','HC','ZC','J','JM','I']].sum(axis=1)
# sz['黄金白银']=sz[['AU','AG']].sum(axis=1)
# sz['工业品']=sz[['CU','AL','ZN','NI','PB','RB','FG','RU','L','V','TA','MA','PP','JM','J','ZC','I','BU','HC','SM','SF','FU']].sum(axis=1)
# sz['农产品']=sz[['A','C','M','RM','Y','P','OI','CF','SR','JD','CS','AP']].sum(axis=1)
# sz['商品板块']=sz.sum(axis=1)
# chemical=sz.copy()
# chemical=chemical[['RU','MA','V','L','PP','BU','TA','商品板块','工业品','化工板块']]
# chemical['max']=chemical[['RU','MA','V','L','PP','BU','TA']].idxmax(axis=1)
# chemical['min']=chemical[['RU','MA','V','L','PP','BU']].idxmin(axis=1)
# chemical['20200120':]
# chemical = chemical.reset_index()
# # futures = client.futures
# # flows = futures.chemical
# # flows.insert(json.loads(chemical.T.to_json()).values())
print("计算完毕")
net = sznet.copy()
net = net[net['date']==date]
print(net)
sum_chg = net['净持仓价值'].apply(lambda x:abs(x)).sum()
net_position= net['净持仓价值'].apply(lambda x:x).sum()
print('总净持仓价值',sum_chg)
net['比例(%)'] = sznet.groupby(['date', 'variety', '会员简称'])['净持仓价值'].apply(lambda x: (x / sum_chg) * 100)
print(net.sort_values('净持仓价值'))
sort=net.sort_values('比例(%)',inplace=False)
plt.bar(range(len(sort['比例(%)'])),sort['比例(%)'])
plt.xticks(range(len(sort['variety'])),sort['variety'])
# plt.xlabel('品种')
plt.ylabel('净持仓价值比例')
plt.title(BrokerID+' 资金分布 '+sort['date'].iloc[0]+" 总市值:"+str(int(sum_chg))+" 净持仓市值:"+str(int(net_position)))
plt.rcParams['font.sans-serif'] = ['SimHei'] # 用来正常显示中文标签
plt.rcParams['axes.unicode_minus'] = False # 用来正常显示负号
plt.show()
|
{"/opendatatools/realestate/__init__.py": ["/opendatatools/realestate/realestate_interface.py"], "/opendatatools/stock/__init__.py": ["/opendatatools/stock/stock_interface.py"], "/opendatatools/common/__init__.py": ["/opendatatools/common/string_util.py"], "/opendatatools/stock/stock_agent.py": ["/opendatatools/common/__init__.py"], "/opendatatools/hkex/__init__.py": ["/opendatatools/hkex/hkex_interface.py"], "/opendatatools/stock/stock_interface.py": ["/opendatatools/stock/stock_agent.py", "/opendatatools/common/__init__.py"], "/opendatatools/fx/fx_interface.py": ["/opendatatools/common/__init__.py"]}
|
30,402,111
|
18505161903/fwshare
|
refs/heads/master
|
/example/fushare1.1.11/receipt.py
|
# -*- coding:utf-8 -*-
"""
Created on 2018年07月12日
@author: lowin
@contact: li783170560@126.com
从大连商品交易所、上海商品交易所、郑州商品交易所爬取每日仓单数据
建议下午16点30以后爬取当天数据,避免交易所数据更新不稳定
"""
import requests
import json
import re
import pandas as pd
import datetime
from fushare import cons
from fushare.symbolVar import *
calendar = cons.get_calendar()
from fushare.requests_fun import *
shfe_20100126 = pd.DataFrame({'var':['CU','AL','ZN','RU','FU','AU','RB','WR'],'reciept':[29783,285396,187713,116435,376200,12,145648,0]})
shfe_20101029 = pd.DataFrame({'var':['CU','AL','ZN','RU','FU','AU','RB','WR'],'reciept':[39214,359729,182562,25990,313600,27,36789,0]})
# ----------------------------------------------------------------------
def get_dce_reciept(date = None,vars=cons.vars):
"""
抓取大连商品交易所注册仓单数据
Parameters
------
date: 开始日期 format:YYYY-MM-DD 或 YYYYMMDD 或 datetime.date对象 为空时为当天
vars: 合约品种如RB、AL等列表 为空时为所有商品
数据从20060106开始,每周五更新仓单数据。直到20090407起,每交易日都更新仓单数据
Return
-------
DataFrame:
展期收益率数据(DataFrame):
var 商品品种 string
reciept 仓单数 int
date 日期 string YYYYMMDD
"""
date = cons.convert_date(date) if date is not None else datetime.date.today()
if date.strftime('%Y%m%d') not in calendar:
print('%s非交易日' %date.strftime('%Y%m%d'))
return None
url = cons.DCE_RECIEPT_URL % (date.year, date.month - 1, date.day)
data = pandas_readHtml_link(url, encoding='utf-8')[0]
records=pd.DataFrame()
for x in data.to_dict(orient='records'):
if type(x[0]) == type('a'):
if x[0][-2:] == '小计':
var = x[0][:-2]
D = {'var':chinese_to_english(var),'reciept':int(x[3]),'date':date.strftime('%Y%m%d')}
records = records.append(pd.DataFrame(D,index=[0]))
if len(records.index) != 0:
records.index = records['var']
vars_inMarket = [i for i in vars if i in records.index]
records = records.loc[vars_inMarket, :]
return records.reset_index(drop=True)
# ----------------------------------------------------------------------
def get_shfe_reciept_1(date = None,vars = cons.vars):
"""
抓取上海商品交易所注册仓单数据
适用20081006至20140518(包括)
20100126、20101029日期交易所格式混乱,直接回复脚本中DataFrame
20100416、20130821日期交易所数据丢失
Parameters
------
date: 开始日期 format:YYYY-MM-DD 或 YYYYMMDD 或 datetime.date对象 为空时为当天
vars: 合约品种如RB、AL等列表 为空时为所有商品
Return
-------
DataFrame:
展期收益率数据(DataFrame):
var 商品品种 string
reciept 仓单数 int
date 日期 string YYYYMMDD
"""
date = cons.convert_date(date).strftime('%Y%m%d') if date is not None else datetime.date.today()
if date not in calendar:
print('%s非交易日' %date)
return None
if date == '20100126':
shfe_20100126['date']=date
return shfe_20100126
elif date == '20101029':
shfe_20101029['date'] = date
return shfe_20101029
elif date in ['20100416','20130821']:
print(u'20100416、20130821日期交易所数据丢失')
return None
else:
varList = ['天然橡胶', '沥青仓库', '沥青厂库', '热轧卷板', '燃料油', '白银', '线材', '螺纹钢', '铅', '铜', '铝', '锌', '黄金', '锡', '镍']
url = cons.SHFE_RECIEPT_URL_1 % date
data = pandas_readHtml_link(url)[0]
indexs = [x for x in data.index if (data[0].tolist()[x] in varList)]
lastIndex = [x for x in data.index if '注' in str(data[0].tolist()[x])][0]-1
records = pd.DataFrame()
for i in list(range(len(indexs))):
if i !=len(indexs)-1:
dataCut = data.loc[indexs[i]:indexs[i+1]-1,:]
else:
dataCut = data.loc[indexs[i]:lastIndex,:]
dataCut = dataCut.fillna(method='pad')
D={}
D['var'] = chinese_to_english(dataCut[0].tolist()[0])
D['reciept'] = int(dataCut[1].tolist()[-1])
D['date'] = date
records = records.append(pd.DataFrame(D,index=[0]))
if len(records.index) != 0:
records.index = records['var']
vars_inMarket = [i for i in vars if i in records.index]
records = records.loc[vars_inMarket, :]
return records.reset_index(drop=True)
# ----------------------------------------------------------------------
def get_shfe_reciept_2(date = None,vars=None):
"""
抓取上海商品交易所注册仓单数据
适用20140519(包括)至今
Parameters
------
date: 开始日期 format:YYYY-MM-DD 或 YYYYMMDD 或 datetime.date对象 为空时为当天
vars: 合约品种如RB、AL等列表 为空时为所有商品
Return
-------
DataFrame:
展期收益率数据(DataFrame):
var 商品品种 string
reciept 仓单数 int
date 日期 string YYYYMMDD
"""
date = cons.convert_date(date).strftime('%Y%m%d') if date is not None else datetime.date.today()
if date not in calendar:
print('%s非交易日' %date)
return None
url = cons.SHFE_RECIEPT_URL_2 % date
r = requests_link(url,encoding='utf-8')
r.encoding = 'utf-8'
try:
context = json.loads(r.text)
except:
return pd.DataFrame()
data = pd.DataFrame(context['o_cursor'])
if len(data.columns) <1:
return pd.DataFrame()
records = pd.DataFrame()
for var in set(data['VARNAME'].tolist()):
dataCut = data[data['VARNAME'] == var]
D = {'var':chinese_to_english(re.sub("\W|[a-zA-Z]", "", var)),'reciept':int(dataCut['WRTWGHTS'].tolist()[-1]),'date':date}
records = records.append(pd.DataFrame(D,index=[0]))
if len(records.index) != 0:
records.index = records['var']
vars_inMarket = [i for i in vars if i in records.index]
records = records.loc[vars_inMarket, :]
return records.reset_index(drop=True)
# ----------------------------------------------------------------------
def get_czce_reciept_1(date = None, vars=cons.vars):
"""
抓取郑州商品交易所注册仓单数据
适用20080222至20100824(包括)
Parameters
------
date: 开始日期 format:YYYY-MM-DD 或 YYYYMMDD 或 datetime.date对象 为空时为当天
vars: 合约品种如CF、TA等列表 为空时为所有商品
Return
-------
DataFrame:
展期收益率数据(DataFrame):
var 商品品种 string
reciept 仓单数 int
date 日期 string YYYYMMDD
"""
date = cons.convert_date(date).strftime('%Y%m%d') if date is not None else datetime.date.today()
if date not in calendar:
print('%s非交易日' %date)
return None
if date == '20090820':
return pd.DataFrame()
url = cons.CZCE_RECIEPT_URL_1 % date
r = requests_link(url,encoding='utf-8')
r.encoding = 'utf-8'
context = r.text
data = pd.read_html(context)[1]
records=pd.DataFrame()
indexs= [x for x in data.index if '品种:' in str(data[0].tolist()[x])]
for i in list(range(len(indexs))):
if i != len(indexs) - 1:
dataCut = data.loc[indexs[i]:indexs[i + 1] - 1, :]
dataCut = dataCut.fillna(method='pad')
else:
dataCut = data.loc[indexs[i]:, :]
dataCut = dataCut.fillna(method='pad')
if 'PTA' in dataCut[0].tolist()[0]:
var = 'TA'
else:
var = chinese_to_english(re.sub('[A-Z]+', '', dataCut[0].tolist()[0][3:]))
if var == 'CF':
reciept = dataCut[6].tolist()[-1]
else:
reciept =dataCut[5].tolist()[-1]
D = {'var':var, 'reciept':int(reciept), 'date':date}
records = records.append(pd.DataFrame(D,index=[0]))
if len(records.index) != 0:
records.index = records['var']
vars_inMarket = [i for i in vars if i in records.index]
records = records.loc[vars_inMarket, :]
return records.reset_index(drop=True)
# ----------------------------------------------------------------------
def get_czce_reciept_2(date = None,vars = cons.vars):
"""
抓取郑州商品交易所注册仓单数据
适用20100825(包括)至20151111(包括)
Parameters
------
date: 开始日期 format:YYYY-MM-DD 或 YYYYMMDD 或 datetime.date对象 为空时为当天
vars: 合约品种如CF、TA等列表 为空时为所有商品
Return
-------
DataFrame:
展期收益率数据(DataFrame):
var 商品品种 string
reciept 仓单数 int
date 日期 string YYYYMMDD
"""
date = cons.convert_date(date).strftime('%Y%m%d') if date is not None else datetime.date.today()
if date not in calendar:
print('%s非交易日' %date)
return None
url = cons.CZCE_RECIEPT_URL_2 % (date[:4], date)
r = requests.get(url)
r.encoding = 'utf-8'
data = pd.read_html(r.text)[3:]
records=pd.DataFrame()
for dataCut in data:
if len(dataCut.columns)>3:
lastIndexs = [x for x in dataCut.index if '注:' in str(dataCut[0].tolist()[x])]
if len(lastIndexs)>0:
lastIndex = lastIndexs[0] - 1
dataCut = dataCut.loc[:lastIndex,:]
if 'PTA' in dataCut[0].tolist()[0]:
var = 'TA'
else:
strings = dataCut[0].tolist()[0]
string = strings.split(' ')[0][3:]
var = chinese_to_english(re.sub('[A-Z]+', '', string))
dataCut.columns = dataCut.T[1].tolist()
reciept = dataCut['仓单数量'].tolist()[-1]
D = {'var':var, 'reciept':int(reciept), 'date':date}
records = records.append(pd.DataFrame(D,index=[0]))
if len(records.index) != 0:
records.index = records['var']
vars_inMarket = [i for i in vars if i in records.index]
records = records.loc[vars_inMarket, :]
return records.reset_index(drop=True)
# ----------------------------------------------------------------------
def get_czce_reciept_3(date = None, vars = cons.vars):
"""
抓取郑州商品交易所注册仓单数据
适用20151112(包括)至今
Parameters
------
date: 开始日期 format:YYYY-MM-DD 或 YYYYMMDD 或 datetime.date对象 为空时为当天
vars: 合约品种如CF、TA等列表 为空时为所有商品
Return
-------
DataFrame:
展期收益率数据(DataFrame):`1
var 商品品种 string
reciept 仓单数 int
date 日期 string YYYYMMDD
"""
date = cons.convert_date(date).strftime('%Y%m%d') if date is not None else datetime.date.today()
if date not in calendar:
print('%s非交易日' %date)
return None
url = cons.CZCE_RECIEPT_URL_3 % (date[:4], date)
r = requests_link(url,encoding='utf-8')
r.encoding = 'utf-8'
data = pd.read_html(r.text, encoding='gb2312')
records=pd.DataFrame()
if len(data) < 4:
return records
if int(date) <= 20171227:
data = data[1:]
for dataCut in data:
if len(dataCut.columns) > 3:
lastIndexs = [x for x in dataCut.index if '注:' in str(dataCut[0].tolist()[x])]
if len(lastIndexs) > 0:
lastIndex = lastIndexs[0] - 1
dataCut = dataCut.loc[:lastIndex, :]
if 'PTA' in dataCut[0].tolist()[0]:
var = 'TA'
else:
strings = dataCut[0].tolist()[0]
string = strings.split(' ')[0][3:]
var = chinese_to_english(re.sub('[A-Z]+', '', string))
dataCut.columns = dataCut.loc[1,:]
dataCut = dataCut.fillna(method='pad')
try:
reciept = dataCut.loc[:, '仓单数量'].tolist()[-1]
except:
reciept = dataCut.loc[:, '仓单数量(保税)'].tolist()[-1]
D = {'var': var, 'reciept': int(reciept), 'date': date}
records = records.append(pd.DataFrame(D, index=[0]))
if len(records.index) != 0:
records.index = records['var']
vars_inMarket = [i for i in vars if i in records.index]
records = records.loc[vars_inMarket, :]
return records.reset_index(drop=True)
# ----------------------------------------------------------------------
def get_reciept(start=None, end=None, vars=cons.vars):
"""
获取大宗商品注册仓单数量
Parameters
------
start: 开始日期 format:YYYY-MM-DD 或 YYYYMMDD 或 datetime.date对象 为空时为当天
end: 结束数据 format:YYYY-MM-DD 或 YYYYMMDD 或 datetime.date对象 为空时为当天
vars: 合约品种如RB、AL等列表 为空时为所有商品
Return
-------
DataFrame
展期收益率数据(DataFrame):
var 商品品种 string
reciept 仓单数量 int
date 日期 string YYYYMMDD
"""
start = cons.convert_date(start) if start is not None else datetime.date.today()
end = cons.convert_date(end) if end is not None else cons.convert_date(cons.get_latestDataDate(datetime.datetime.now()))
records=pd.DataFrame()
while start <= end:
if start.strftime('%Y%m%d') not in calendar:
print('%s非交易日' % start.strftime('%Y%m%d'))
else:
print(start)
for market,marketVars in cons.market_var.items():
if market == 'dce':
if start >= datetime.date(2009,4,7):
f = get_dce_reciept
else:
print(u'20090407起,dce每交易日更新仓单数据')
f = None
elif market == 'shfe':
if start <= datetime.date(2014,5,16) and start >= datetime.date(2008,10,6):
f = get_shfe_reciept_1
elif start > datetime.date(2014,5,16):
f = get_shfe_reciept_2
else:
f=None
print(u'20081006起,shfe每交易日更新仓单数据')
elif market == 'czce':
if start <= datetime.date(2010,8,24) and start >= datetime.date(2008,3,3):
f = get_czce_reciept_1
elif start <= datetime.date(2015,11,11) and start > datetime.date(2010,8,24):
f = get_czce_reciept_2
elif start > datetime.date(2015,11,11):
f = get_czce_reciept_3
else:
f=None
print(u'20080303起,czce每交易日更新仓单数据')
get_vars = [var for var in vars if var in marketVars]
if market != 'cffex' and get_vars != []:
if f is not None:
records = records.append(f(start,get_vars))
start += datetime.timedelta(days=1)
return records.reset_index(drop=True)
if __name__ == '__main__':
d = get_reciept(start='20200201', end='20200221', vars = ['SR'])
d.to_csv('E://reciept.csv')
|
{"/opendatatools/realestate/__init__.py": ["/opendatatools/realestate/realestate_interface.py"], "/opendatatools/stock/__init__.py": ["/opendatatools/stock/stock_interface.py"], "/opendatatools/common/__init__.py": ["/opendatatools/common/string_util.py"], "/opendatatools/stock/stock_agent.py": ["/opendatatools/common/__init__.py"], "/opendatatools/hkex/__init__.py": ["/opendatatools/hkex/hkex_interface.py"], "/opendatatools/stock/stock_interface.py": ["/opendatatools/stock/stock_agent.py", "/opendatatools/common/__init__.py"], "/opendatatools/fx/fx_interface.py": ["/opendatatools/common/__init__.py"]}
|
30,402,112
|
18505161903/fwshare
|
refs/heads/master
|
/22/import data.py
|
# -*- coding:utf-8 -*-
import time
import os
DBUSER='myadmin'
DBPASS='redhat'
IP='192.168.122.1' #将数据导入改主机
DATA_DIR='/data'
PATH_RES='/usr/local/mongodb/bin/mongorestore'
BACKITEMS=[
"%s -h %s:27017 --dir %s" % (PATH_RES,IP,DATA_DIR)
]
def backData():
try:
for item in BACKITEMS:
print(item )
print (os.system(item) )
except RuntimeError,e:
print( str(e) )
if __name__=="__main__":
backData()
|
{"/opendatatools/realestate/__init__.py": ["/opendatatools/realestate/realestate_interface.py"], "/opendatatools/stock/__init__.py": ["/opendatatools/stock/stock_interface.py"], "/opendatatools/common/__init__.py": ["/opendatatools/common/string_util.py"], "/opendatatools/stock/stock_agent.py": ["/opendatatools/common/__init__.py"], "/opendatatools/hkex/__init__.py": ["/opendatatools/hkex/hkex_interface.py"], "/opendatatools/stock/stock_interface.py": ["/opendatatools/stock/stock_agent.py", "/opendatatools/common/__init__.py"], "/opendatatools/fx/fx_interface.py": ["/opendatatools/common/__init__.py"]}
|
30,402,113
|
18505161903/fwshare
|
refs/heads/master
|
/22/scrapy/2.py
|
import xlrd
def strs(row):
values=''
for i in range(len(row)):
if i == len(row)-1:
values=values+str(row(i))
else:
values=values+strs(row[i])+','
return values
#打开文件
data=xlrd.open_workbook('d:/2017.xlsx')
file=open('d:/rb2018.txt')#文件读写方式是追加
table=data.sheets()[0]
nrows=table.nrows
ncols=table.ncols
colnames=table.row_values(0)
print(nrows)
print(ncols)
print(colnames)
for ronum in range(1,nrows):
row=table.row_values(ronum)
values=strs(nrows)
file.writelines(values+'\r')
file.close()
|
{"/opendatatools/realestate/__init__.py": ["/opendatatools/realestate/realestate_interface.py"], "/opendatatools/stock/__init__.py": ["/opendatatools/stock/stock_interface.py"], "/opendatatools/common/__init__.py": ["/opendatatools/common/string_util.py"], "/opendatatools/stock/stock_agent.py": ["/opendatatools/common/__init__.py"], "/opendatatools/hkex/__init__.py": ["/opendatatools/hkex/hkex_interface.py"], "/opendatatools/stock/stock_interface.py": ["/opendatatools/stock/stock_agent.py", "/opendatatools/common/__init__.py"], "/opendatatools/fx/fx_interface.py": ["/opendatatools/common/__init__.py"]}
|
30,402,114
|
18505161903/fwshare
|
refs/heads/master
|
/opendatatools/realestate/realestate_interface.py
|
# encoding: utf-8
from .lianjia_agent import LianjiaAgent
lianjia_agent = LianjiaAgent()
def set_proxies(proxies):
lianjia_agent.set_proxies(proxies)
def get_esf_list_lianjia(city, max_page_no = 100):
return lianjia_agent.get_esf_list(city, max_page_no)
def get_esf_list_by_distinct_lianjia(city, distinct, max_page_no = 100):
return lianjia_agent.get_esf_list_by_distinct(city, distinct, max_page_no)
|
{"/opendatatools/realestate/__init__.py": ["/opendatatools/realestate/realestate_interface.py"], "/opendatatools/stock/__init__.py": ["/opendatatools/stock/stock_interface.py"], "/opendatatools/common/__init__.py": ["/opendatatools/common/string_util.py"], "/opendatatools/stock/stock_agent.py": ["/opendatatools/common/__init__.py"], "/opendatatools/hkex/__init__.py": ["/opendatatools/hkex/hkex_interface.py"], "/opendatatools/stock/stock_interface.py": ["/opendatatools/stock/stock_agent.py", "/opendatatools/common/__init__.py"], "/opendatatools/fx/fx_interface.py": ["/opendatatools/common/__init__.py"]}
|
30,402,115
|
18505161903/fwshare
|
refs/heads/master
|
/example/futures_receipt.py
|
#!/usr/bin/env python
# -*- coding:utf-8 -*-
import os
import sys
import re
import json
import requests
import datetime
HEADER = [u'日期', u'品种', u'期货仓单', u'仓单变化']
OUTPUT_CSV_PATH = "./receipt.csv"
URL_TEMPL = "http://www.shfe.com.cn/data/dailydata/{}dailystock.dat"
def write_to_csv(datas, header):
if os.path.exists(OUTPUT_CSV_PATH):
os.remove(OUTPUT_CSV_PATH)
with open(OUTPUT_CSV_PATH, 'w+') as f:
f.write(",".join(header))
f.write("\n")
for line in datas:
f.write(",".join(map(lambda x: str(x), line)))
f.write("\n")
def check_proc_params(start_date_str, end_date_str):
def check_date_format(date_str):
if re.match(r"^\d{4}-\d{2}-\d{2}$", date_str):
return True
else:
return False
date_list = []
if check_date_format(start_date_str) and \
check_date_format(end_date_str):
year_start, month_start, day_start = start_date_str.split("-")
year_end, month_end, day_end = end_date_str.split("-")
start_date = datetime.date(int(year_start),
int(month_start),
int(day_start))
end_date = datetime.date(int(year_end),
int(month_end),
int(day_end))
delta_days = (end_date-start_date).days
if delta_days>=0:
for i in range(0, delta_days+1):
date = start_date+datetime.timedelta(days=i)
date_list.append(date.strftime('%Y%m%d'))
return date_list
else:
print("input params end_date is earlier than start_date")
raise Exception
else:
return None
def get_inventory_data(start_date, end_date):
print("start_date is {}, end_date is {}".format(start_date, end_date))
date_list = check_proc_params(start_date, end_date)
datas = []
for date_str in date_list:
url = URL_TEMPL.format(date_str)
resp = requests.get(url)
if resp.status_code == 404:
continue
elif resp.status_code != 200:
print("the resp status code of date({}) is {}".format(date_str, resp.status_code))
jsonObj = json.loads(resp.content.decode('utf-8'))
tradingday = jsonObj['o_tradingday']
for idx, l in enumerate(jsonObj['o_cursor']):
if not re.match(r'\S+?\$\$Total$', l['WHABBRNAME']):
continue
datas.append([tradingday, l['VARNAME'].split('$$')[0],
l['WRTWGHTS'], l['WRTCHANGE']])
write_to_csv(datas, HEADER)
if __name__ == '__main__':
get_inventory_data('2018-11-07', '2018-11-07')
|
{"/opendatatools/realestate/__init__.py": ["/opendatatools/realestate/realestate_interface.py"], "/opendatatools/stock/__init__.py": ["/opendatatools/stock/stock_interface.py"], "/opendatatools/common/__init__.py": ["/opendatatools/common/string_util.py"], "/opendatatools/stock/stock_agent.py": ["/opendatatools/common/__init__.py"], "/opendatatools/hkex/__init__.py": ["/opendatatools/hkex/hkex_interface.py"], "/opendatatools/stock/stock_interface.py": ["/opendatatools/stock/stock_agent.py", "/opendatatools/common/__init__.py"], "/opendatatools/fx/fx_interface.py": ["/opendatatools/common/__init__.py"]}
|
30,402,116
|
18505161903/fwshare
|
refs/heads/master
|
/futures/汇总盈亏统计.py
|
# encoding: utf-8
# %matplotlib inline
import pymongo, json
import pandas as pd
from pandas import DataFrame
import matplotlib.pyplot as plt
#
pd.set_option('display.width', None) # 设置字符显示宽度
pd.set_option('display.max_rows', None) # 设置显示最大行
pd.set_option('display.max_columns', None) # 设置显示最大行
client = pymongo.MongoClient('localhost', 27017)
futures = client.futures2
#
start = '20130801'
end ='20200330'
# var = 'MA'
df = pd.DataFrame()
varlist=DataFrame(list(futures.position.find({"date":end})))['variety'].drop_duplicates()
print(varlist)
for var in varlist:
try:
var =var
position = futures.position
market = futures.market
# market1 = futures.p
market = DataFrame(list(market.find({'date': {'$gte': start}, 'variety': var})))
position = DataFrame(list(position.find({'date': {'$gte': start}, 'variety': var}))).drop_duplicates(
['date', 'variety', 'symbol', 'long_party_name'], 'last')
# position = position[position['long_party_name'].notna()]
# 持仓
# 所有会员
party_name = position[position['date'] == end]
long_party_name = party_name['long_party_name']
short_party_name = party_name['short_party_name']
party_name = long_party_name.append(short_party_name).dropna().drop_duplicates()
# 多空变化量求和
long = position.groupby(['date', 'variety', 'long_party_name'])[
['long_openIntr', 'long_openIntr_chg']].sum()
# print(long)
short = position.groupby(['date', 'variety', 'short_party_name'])[
['short_openIntr', 'short_openIntr_chg']].sum()
# # 合并
frames = [long, short]
position = pd.concat(frames, axis=1, sort=True).fillna(0).reset_index()
# 字段更名
position = position.rename(columns={'level_0': 'date', 'level_1': 'variety', 'level_2': 'BrokerID'})
#
##行情
market = market.copy()
# 指数收盘
market['cv'] = market.apply(lambda x: x['close'] * x['open_interest'], axis=1)
closes = market.groupby(['date', 'variety'])[['cv', 'open_interest']].sum()
closes['close_index'] = closes['cv'] / closes['open_interest']
# #指数开盘
market['ov'] = market.apply(lambda x: x['open'] * x['open_interest'], axis=1)
opens = market.groupby(['date', 'variety'])[['ov', 'open_interest']].sum()
closes['open_index'] = opens['ov'] / opens['open_interest']
# 价格变化量
closes['change_index'] = closes.apply(lambda x: x['close_index'] - x['open_index'], axis=1)
closes = closes.reset_index()
chg = closes[['date', 'variety', 'close_index', 'change_index']]
# print(chg['change_index'])
for i in party_name:
try:
chg = chg.copy()
# print(chg)
chg['BrokerID'] = i
position1 = position[position['BrokerID'] == i]
# 两表合并
mem = pd.merge(chg, position1, on=['date', 'variety', 'BrokerID'], how='left').fillna(0)
# mem = merge[merge['BrokerID'] == i]
# print(mem)
mem = mem.copy()
mem['today_net'] = mem.apply(lambda x: x['long_openIntr'] - x['short_openIntr'], axis=1)
mem['yesterday_net'] = mem.groupby(['variety', 'BrokerID'])['today_net'].shift(1)
mem['tomorrow_chg'] = mem.groupby(['variety', 'BrokerID'])['change_index'].shift(-1)
mem['net_chg'] = mem.apply(lambda x: x['today_net'] - x['yesterday_net'], axis=1)
#
mem['count'] = mem['net_chg'].count()
# mem = mem.rename(columns={'long_openIntr': 'long_openIntr', 'long_openIntr_chg': 'long_openIntr_chg', 'short_openIntr': 'short_openIntr','short_openIntr_chg': 'short_openIntr_chg'})
mem['change'] = mem['close_index'] - mem['close_index'].shift(1)
# 时间窗口相关系数
mem['corr'] = mem['net_chg'].rolling(window=240).corr(mem['change_index'])
mem['corr2'] = mem['net_chg'].rolling(window=240).corr(mem['tomorrow_chg']).shift(1)
mem['corr3'] = mem['today_net'].rolling(window=240).corr(mem['change'])
#
mem['lot'] = 0
# mem = mem.copy()
mem['lot'] = mem.apply(lambda x: 0 if x['today_net'] == 0 else 1 if x['today_net'] > 0 else -1, axis=1)
mem['lot'] = mem['lot'].shift(1).fillna(0)
mem['pnl'] = mem['change'] * mem['today_net'].shift(1)
# mem['fee']=0
# mem['fee'][mem['lot'] != mem['lot'].shif(1)] = mem['close_index'] * 2*1
mem['netpnl'] = mem['pnl']
mem['cumpnl'] = mem['netpnl'].cumsum() / 10000
# print(mem)
# mem['date'] = pd.to_datetime(mem['date'])
# print(mem)
# 画图
# mem =mem.set_index('date')
# with pd.plotting.plot_params.use('x_compat', True): # 方法一
# mem[['cumpnl']].plot(color='r',title=mem[u'BrokerID'][0]+" "+var+' '+end+ ' 累计盈亏:'+str(int(mem['cumpnl'].iloc[-1]))+"万 净持仓:"+str(int(mem['today_net'].iloc[-1]))+'手')
# mem['today_net'].plot(secondary_y=['today_net'])
# plt.xlabel('万')
# plt.ylabel('净持仓')
#
#
# plt.rcParams['font.sans-serif'] = ['SimHei'] # 用来正常显示中文标签
# plt.rcParams['axes.unicode_minus'] = False # 用来正常显示负号
# plt.show()
# plt.plot(mem['cumpnl'])
# print(mem)
mem = mem[-1:]
# print(mem)
# mem['start'] = mem['date'].values[0]
# print(mem)
#
# market1.insert(json.loads(mem.T.to_json()).values())
# print(json.loads(mem.T.to_json()).values())
# ends = mem[mem['date'] == end]
# ends=ends.copy()
f = mem[['variety', 'BrokerID', 'corr', 'corr2', 'today_net', 'net_chg', 'corr3', 'cumpnl']].sort_values(
'cumpnl', inplace=False) # [['date','variety','BrokerID','corr','corr2','cumpnl']]
# print(f)
flows = f.rename(columns={'today_net': '净持仓', 'cumpnl': '累计盈亏', 'net_chg': '净持仓变化量', 'corr3': '相关系数'})
f = flows[['variety', 'BrokerID', '净持仓', '累计盈亏']]
df1 = pd.DataFrame(f)
df = df.append(df1)
# print(df)
except:
continue
except:
continue
print(df)
df = df[['variety','BrokerID','累计盈亏']].drop_duplicates()
sum = df.groupby('BrokerID')['累计盈亏'].sum()
# sum = sum.sort_values('累计盈亏', inplace=False)
print(sum)
|
{"/opendatatools/realestate/__init__.py": ["/opendatatools/realestate/realestate_interface.py"], "/opendatatools/stock/__init__.py": ["/opendatatools/stock/stock_interface.py"], "/opendatatools/common/__init__.py": ["/opendatatools/common/string_util.py"], "/opendatatools/stock/stock_agent.py": ["/opendatatools/common/__init__.py"], "/opendatatools/hkex/__init__.py": ["/opendatatools/hkex/hkex_interface.py"], "/opendatatools/stock/stock_interface.py": ["/opendatatools/stock/stock_agent.py", "/opendatatools/common/__init__.py"], "/opendatatools/fx/fx_interface.py": ["/opendatatools/common/__init__.py"]}
|
30,402,117
|
18505161903/fwshare
|
refs/heads/master
|
/example/fushare1.1.11/cot.py
|
# -*- coding:utf-8 -*-
"""
Created on 2018年07月18日
@author: lowin
@contact: li783170560@126.com
从大连商品交易所、上海商品交易所、郑州商品交易所、中金交易所爬取前20会员持仓数据
建议下午16点30以后爬取当天数据,避免交易所数据更新不稳定
郑州交易所格式分为三类
"""
import json
from bs4 import BeautifulSoup
from io import StringIO
import datetime
from fushare.requests_fun import *
from fushare.symbolVar import *
calendar = cons.get_calendar()
rank_columns = ['vol_party_name', 'vol', 'vol_chg','long_party_name', 'long_openIntr',
'long_openIntr_chg', 'short_party_name', 'short_openIntr', 'short_openIntr_chg']
intColumns = ['vol', 'vol_chg', 'long_openIntr', 'long_openIntr_chg', 'short_openIntr', 'short_openIntr_chg']
def get_rank_sum_daily(start=None, end=None, vars=cons.vars):
"""
抓取四个期货交易所前5、前10、前15、前20会员持仓排名数据
注1:由于上期所和中金所只公布每个品种内部的标的排名,没有公布品种的总排名;
所以函数输出的品种排名是由品种中的每个标的加总获得,并不是真实的品种排名列表
注2:大商所只公布了品种排名,未公布标的排名
Parameters
------
start: 开始日期 format:YYYY-MM-DD 或 YYYYMMDD 或 datetime.date对象 为空时为当天
end: 结束数据 format:YYYY-MM-DD 或 YYYYMMDD 或 datetime.date对象 为空时为当天
vars: 合约品种如RB、AL等列表 为空时为所有商品
Return
-------
DataFrame:
展期收益率数据(DataFrame):
symbol 标的合约 string
var 商品品种 string
vol_top5 成交量前5会员成交量总和 int
vol_chg_top5 成交量前5会员成交量变化总和 int
long_openIntr_top5 持多单前5会员持多单总和 int
long_openIntr_chg_top5 持多单前5会员持多单变化总和 int
short_openIntr_top5 持空单前5会员持空单总和 int
short_openIntr_chg_top5 持空单前5会员持空单变化总和 int
vol_top10 成交量前10会员成交量总和 int
...
date 日期 string YYYYMMDD
"""
start = cons.convert_date(start) if start is not None else datetime.date.today()
end = cons.convert_date(end) if end is not None else cons.convert_date(cons.get_latestDataDate(datetime.datetime.now()))
records = pd.DataFrame()
while start <= end:
print(start)
if start.strftime('%Y%m%d') in calendar:
data = get_rank_sum(start, vars)
if data is False:
print('%s日交易所数据连接失败,已超过20次,您的地址被网站墙了,请保存好返回数据,稍后从该日期起重试' % start.strftime('%Y-%m-%d'))
return records.reset_index(drop=True)
records = records.append(data)
else:
print('%s非交易日' % start.strftime('%Y%m%d'))
start += datetime.timedelta(days=1)
return records.reset_index(drop=True)
def get_rank_sum(date = None,vars=cons.vars):
"""
抓取四个期货交易所前5、前10、前15、前20会员持仓排名数据
注1:由于上期所和中金所只公布每个品种内部的标的排名,没有公布品种的总排名;
所以函数输出的品种排名是由品种中的每个标的加总获得,并不是真实的品种排名列表
注2:大商所只公布了品种排名,未公布标的排名
Parameters
------
date: 日期 format:YYYY-MM-DD 或 YYYYMMDD 或 datetime.date对象 为空时为当天
vars: 合约品种如RB、AL等列表 为空时为所有商品
Return
-------
DataFrame:
展期收益率数据(DataFrame):
symbol 标的合约 string
var 商品品种 string
vol_top5 成交量前5会员成交量总和 int
vol_chg_top5 成交量前5会员成交量变化总和 int
long_openIntr_top5 持多单前5会员持多单总和 int
long_openIntr_chg_top5 持多单前5会员持多单变化总和 int
short_openIntr_top5 持空单前5会员持空单总和 int
short_openIntr_chg_top5 持空单前5会员持空单变化总和 int
vol_top10 成交量前10会员成交量总和 int
...
date 日期 string YYYYMMDD
"""
date = cons.convert_date(date) if date is not None else datetime.date.today()
if date.strftime('%Y%m%d') not in calendar:
return None
dce_var = [i for i in vars if i in cons.market_var['dce']]
shfe_var = [i for i in vars if i in cons.market_var['shfe']]
czce_var = [i for i in vars if i in cons.market_var['czce']]
cffex_var = [i for i in vars if i in cons.market_var['cffex']]
D={}
if len(dce_var)>0:
data = get_dce_rank_table(date, dce_var)
if data is False:
return False
D.update(data)
if len(shfe_var)>0:
data = get_shfe_rank_table(date, shfe_var)
if data is False:
return False
D.update(data)
if len(czce_var)>0:
data = get_czce_rank_table(date, czce_var)
if data is False:
return False
D.update(data)
if len(cffex_var)>0:
data = get_cffex_rank_table(date, cffex_var)
if data is False:
return False
D.update(data)
records=pd.DataFrame()
for symbol, table in D.items():
table = table.applymap(lambda x: 0 if x == '' else x)
for symbol in set(table['symbol']):
var = symbol2varietie(symbol)
if var in vars:
tableCut = table[table['symbol'] == symbol]
tableCut['rank'] = tableCut['rank'].astype('float')
tableCut_top5 = tableCut[tableCut['rank'] <= 5]
tableCut_top10 = tableCut[tableCut['rank'] <= 10]
tableCut_top15 = tableCut[tableCut['rank'] <= 15]
tableCut_top20 = tableCut[tableCut['rank'] <= 20]
D = {'symbol': symbol, 'var': var,
'vol_top5': tableCut_top5['vol'].sum(), 'vol_chg_top5': tableCut_top5['vol_chg'].sum(),
'long_openIntr_top5': tableCut_top5['long_openIntr'].sum(),
'long_openIntr_chg_top5': tableCut_top5['long_openIntr_chg'].sum(),
'short_openIntr_top5': tableCut_top5['short_openIntr'].sum(),
'short_openIntr_chg_top5': tableCut_top5['short_openIntr_chg'].sum(),
'vol_top10': tableCut_top10['vol'].sum(), 'vol_chg_top10': tableCut_top10['vol_chg'].sum(),
'long_openIntr_top10': tableCut_top10['long_openIntr'].sum(),
'long_openIntr_chg_top10': tableCut_top10['long_openIntr_chg'].sum(),
'short_openIntr_top10': tableCut_top10['short_openIntr'].sum(),
'short_openIntr_chg_top10': tableCut_top10['short_openIntr_chg'].sum(),
'vol_top15': tableCut_top15['vol'].sum(), 'vol_chg_top15': tableCut_top15['vol_chg'].sum(),
'long_openIntr_top15': tableCut_top15['long_openIntr'].sum(),
'long_openIntr_chg_top15': tableCut_top15['long_openIntr_chg'].sum(),
'short_openIntr_top15': tableCut_top15['short_openIntr'].sum(),
'short_openIntr_chg_top15': tableCut_top15['short_openIntr_chg'].sum(),
'vol_top20': tableCut_top20['vol'].sum(), 'vol_chg_top20': tableCut_top20['vol_chg'].sum(),
'long_openIntr_top20': tableCut_top20['long_openIntr'].sum(),
'long_openIntr_chg_top20': tableCut_top20['long_openIntr_chg'].sum(),
'short_openIntr_top20': tableCut_top20['short_openIntr'].sum(),
'short_openIntr_chg_top20': tableCut_top20['short_openIntr_chg'].sum(),
'date': date.strftime('%Y%m%d')
}
records = records.append(pd.DataFrame(D, index=[0]))
if len(D.items())>0:
add_vars = [i for i in cons.market_var['shfe']+cons.market_var['cffex'] if i in records['var'].tolist()]
for var in add_vars:
recordsCut = records[records['var'] == var]
var_record = pd.DataFrame(recordsCut.sum()).T
var_record['date'] = date.strftime('%Y%m%d')
var_record.loc[:,['var','symbol']] = var
records = records.append(var_record)
return records.reset_index(drop=True)
def get_shfe_rank_table(date = None,vars = cons.vars):
"""
抓取上海商品期货交易所前20会员持仓排名数据明细
注:该交易所只公布每个品种内部的标的排名,没有公布品种的总排名
Parameters
------
date: 日期 format:YYYY-MM-DD 或 YYYYMMDD 或 datetime.date对象 为空时为当天
vars: 合约品种如RB、AL等列表 为空时为所有商品
数据从20020107开始,每交易日16:30左右更新数据
Return
-------
DataFrame:
rank 排名 int
vol_party_name 成交量排序的当前名次会员 string(中文)
vol 该会员成交量 int
vol_chg 该会员成交量变化量 int
long_party_name 持多单排序的当前名次会员 string(中文)
long_openIntr 该会员持多单 int
long_openIntr_chg 该会员持多单变化量 int
short_party_name 持空单排序的当前名次会员 string(中文)
short_openIntr 该会员持空单 int
short_openIntr_chg 该会员持空单变化量 int
symbol 标的合约 string
var 品种 string
date 日期 string YYYYMMDD
"""
date = cons.convert_date(date) if date is not None else datetime.date.today()
if date < datetime.date(2002,1,7):
print("shfe数据源开始日期为20020107,跳过")
return {}
if date.strftime('%Y%m%d') not in calendar:
print('%s非交易日' % date.strftime('%Y%m%d'))
return {}
url = cons.SHFE_VOLRANK_URL %(date.strftime('%Y%m%d'))
r = requests_link(url,'utf-8')
try:
context = json.loads(r.text)
except:
return {}
df = pd.DataFrame(context['o_cursor'])
df = df.rename(columns={'CJ1': 'vol', 'CJ1_CHG': 'vol_chg', 'CJ2': 'long_openIntr', 'CJ2_CHG': 'long_openIntr_chg',
'CJ3': 'short_openIntr',
'CJ3_CHG': 'short_openIntr_chg', 'PARTICIPANTABBR1': 'vol_party_name',
'PARTICIPANTABBR2': 'long_party_name',
'PARTICIPANTABBR3': 'short_party_name', 'PRODUCTNAME': 'product1', 'RANK': 'rank',
'INSTRUMENTID': 'symbol','PRODUCTSORTNO':'product2'})
if len(df.columns)<3:
return {}
df = df.applymap(lambda x: x.strip() if type(x) == type('') else x)
df = df.applymap(lambda x: None if x == '' else x)
df['var'] = df['symbol'].apply(lambda x: symbol2varietie(x))
df = df[df['rank'] > 0]
for col in ['PARTICIPANTID1','PARTICIPANTID2','PARTICIPANTID3','product1','product2']:
try:
del df[col]
except:
pass
get_vars = [var for var in vars if var in df['var'].tolist()]
D={}
for var in get_vars:
df_var = df[df['var'] == var]
for symbol in set(df_var['symbol']):
df_symbol = df_var[df_var['symbol'] == symbol]
D[symbol] = df_symbol.reset_index(drop=True)
return D
def _czce_df_read(url,skiprow,encode='utf-8'):
"""
抓取郑州商品期货交易所的网页数据
Parameters
------
url: 网站 string
skiprow: 去掉前几行 int
Return
-------
DataFrame
"""
r = requests_link(url,encode)
data = pd.read_html(r.text, match='.+', flavor=None, header=0, index_col=0, skiprows=skiprow, attrs=None,
parse_dates=False, tupleize_cols=False, thousands=', ', encoding="gbk", decimal='.',
converters=None, na_values=None, keep_default_na=True)
return data
def get_czce_rank_table(date = None,vars = cons.vars):
"""
抓取郑州商品期货交易所前20会员持仓排名数据明细
注:该交易所即公布了品种排名,也公布了标的排名
Parameters
------
date: 日期 format:YYYY-MM-DD 或 YYYYMMDD 或 datetime.date对象 为空时为当天
vars: 合约品种如RB、AL等列表 为空时为所有商品
数据从20050509开始,每交易日16:30左右更新数据
Return
-------
DataFrame:
rank 排名 int
vol_party_name 成交量排序的当前名次会员 string(中文)
vol 该会员成交量 int
vol_chg 该会员成交量变化量 int
long_party_name 持多单排序的当前名次会员 string(中文)
long_openIntr 该会员持多单 int
long_openIntr_chg 该会员持多单变化量 int
short_party_name 持空单排序的当前名次会员 string(中文)
short_openIntr 该会员持空单 int
short_openIntr_chg 该会员持空单变化量 int
symbol 标的合约 string
var 品种 string
date 日期 string YYYYMMDD
"""
date = cons.convert_date(date) if date is not None else datetime.date.today()
if date < datetime.date(2005,5,9):
print("czce数据源开始日期为20050509,跳过")
return {}
if date.strftime('%Y%m%d') not in calendar:
print('%s非交易日' % date.strftime('%Y%m%d'))
return {}
if date <= datetime.date(2010, 8, 25):
url = cons.CZCE_VOLRANK_URL_1 % (date.strftime('%Y%m%d'))
data = _czce_df_read(url,skiprow=0)
r = requests_link(url,'utf-8')
r.encoding = 'utf-8'
soup = BeautifulSoup(r.text, 'lxml', from_encoding="gb2312")
symbols=[]
for link in soup.find_all('b'):
strings = (str(link).split(' '))
if len(strings)>5:
try:
symbol = chinese_to_english(strings[4])
except:
symbol = strings[4]
symbols.append(symbol)
D={}
for i in range(len(symbols)):
symbol = symbols[i]
tableCut = data[i+2]
tableCut.columns = rank_columns
tableCut = tableCut.iloc[:-1,:]
tableCut.loc[:,'rank'] = tableCut.index
tableCut.loc['合计','rank'] = 999
tableCut.loc['合计',['vol_party_name','long_party_name','short_party_name']] = None
tableCut.loc[:,'symbol'] = symbol
tableCut.loc[:,'var'] = symbol2varietie(symbol)
tableCut[intColumns] = tableCut[intColumns].fillna(0)
tableCut[intColumns] = tableCut[intColumns].astype(str)
tableCut[intColumns] = tableCut[intColumns].applymap(lambda x: x.replace(',', ''))
tableCut = tableCut.applymap(lambda x: 0 if x == '-' else x)
tableCut[intColumns] = tableCut[intColumns].astype(float)
tableCut[intColumns] = tableCut[intColumns].astype(int)
D[symbol] = tableCut.reset_index(drop=True)
return D
elif date <= datetime.date(2015, 11, 11):
url = cons.CZCE_VOLRANK_URL_2 % (date.year, date.strftime('%Y%m%d'))
data = _czce_df_read(url,skiprow=1)[1]
elif date < datetime.date(2017, 12, 28):
url = cons.CZCE_VOLRANK_URL_3 % (date.year, date.strftime('%Y%m%d'))
data = _czce_df_read(url,skiprow=1)[0]
else:
url = cons.CZCE_VOLRANK_URL_3 % (date.year, date.strftime('%Y%m%d'))
data = _czce_df_read(url, skiprow=0)[0]
if len(data.columns) <6:
return {}
table = data.iloc[:, :9]
table.columns = rank_columns
table.loc[:,'rank'] = table.index
table[intColumns] = table[intColumns].astype(str)
table[intColumns] = table[intColumns].applymap(lambda x: x.replace(',', ''))
table = table.applymap(lambda x: 0 if x == '-' else x)
indexs = [i for i in table.index if '合约' in i or '品种' in i]
indexs.insert(0,0)
D = {}
for i in range(len(indexs)):
if indexs[i] == 0:
tableCut = table.loc[:indexs[i + 1], :]
string = tableCut.index.name
elif i < len(indexs) - 1:
tableCut = table.loc[indexs[i]:indexs[i + 1], :]
string = tableCut.index[0]
else:
tableCut = table.loc[indexs[i]:, :]
string = tableCut.index[0]
if 'PTA' in string:
symbol ='TA'
else:
try:
symbol = chinese_to_english(find_chinese(re.compile(':(.*) ').findall(string)[0]))
except:
symbol = re.compile(':(.*) ').findall(string)[0]
var = symbol2varietie(symbol)
if var in vars:
tableCut = tableCut.dropna(how='any').iloc[1:, :]
tableCut = tableCut.loc[[x for x in tableCut.index if x in [str(i) for i in range(21)]], :]
tableCut = _tableCut_cal(tableCut, symbol)
D[symbol] = tableCut.reset_index(drop=True)
return D
def get_dce_rank_table(date = None,vars = cons.vars):
"""
抓取大连商品期货交易所前20会员持仓排名数据明细
注:该交易所即公布了品种排名,也公布了标的排名
Parameters
------
date: 日期 format:YYYY-MM-DD 或 YYYYMMDD 或 datetime.date对象 为空时为当天
vars: 合约品种如RB、AL等列表 为空时为所有商品
数据从20060104开始,每交易日16:30左右更新数据
Return
-------
DataFrame:
rank 排名 int
vol_party_name 成交量排序的当前名次会员 string(中文)
vol 该会员成交量 int
vol_chg 该会员成交量变化量 int
long_party_name 持多单排序的当前名次会员 string(中文)
long_openIntr 该会员持多单 int
long_openIntr_chg 该会员持多单变化量 int
short_party_name 持空单排序的当前名次会员 string(中文)
short_openIntr 该会员持空单 int
short_openIntr_chg 该会员持空单变化量 int
symbol 标的合约 string
var 品种 string
date 日期 string YYYYMMDD
"""
date = cons.convert_date(date) if date is not None else datetime.date.today()
if date < datetime.date(2006, 1, 4):
print(Exception("dce数据源开始日期为20060104,跳过"))
return {}
if date.strftime('%Y%m%d') not in calendar:
print('%s非交易日' % date.strftime('%Y%m%d'))
return {}
vars = [i for i in vars if i in cons.market_var['dce']]
D={}
for var in vars:
url = cons.DCE_VOLRANK_URL % (var.lower(), var.lower(), date.year, date.month - 1, date.day)
list_60_name = []
list_60 = []
list_60_chg = []
rank = []
texts = urllib_request_link(url)
if texts == None:
return False
if len(texts)>30:
for text in texts:
line = text.decode('utf8')
stringlist = line.split()
try:
if int(stringlist[0]) <= 20:
list_60_name.append(stringlist[1])
list_60.append(stringlist[2])
list_60_chg.append(stringlist[3])
rank.append(stringlist[0])
except:
pass
tableCut = pd.DataFrame({'rank': rank[0:20],
'vol_party_name': list_60_name[0:20],
'vol': list_60[0:20],
'vol_chg': list_60_chg[0:20],
'long_party_name': list_60_name[20:40],
'long_openIntr': list_60[20:40],
'long_openIntr_chg': list_60_chg[20:40],
'short_party_name': list_60_name[40:60],
'short_openIntr': list_60[40:60],
'short_openIntr_chg': list_60_chg[40:60]
})
tableCut = tableCut.applymap(lambda x: x.replace(',', ''))
tableCut = _tableCut_cal(tableCut, var)
D[var] = tableCut.reset_index(drop=True)
return D
def get_cffex_rank_table(date = None,vars = cons.vars):
"""
抓取郑州商品期货交易所前20会员持仓排名数据明细
注:该交易所即公布了品种排名,也公布了标的排名
Parameters
------
date: 日期 format:YYYY-MM-DD 或 YYYYMMDD 或 datetime.date对象 为空时为当天
vars: 合约品种如RB、AL等列表 为空时为所有商品
数据从20100416开始,每交易日16:30左右更新数据
Return
-------
DataFrame:
rank 排名 int
vol_party_name 成交量排序的当前名次会员 string(中文)
vol 该会员成交量 int
vol_chg 该会员成交量变化量 int
long_party_name 持多单排序的当前名次会员 string(中文)
long_openIntr 该会员持多单 int
long_openIntr_chg 该会员持多单变化量 int
short_party_name 持空单排序的当前名次会员 string(中文)
short_openIntr 该会员持空单 int
short_openIntr_chg 该会员持空单变化量 int
symbol 标的合约 string
var 品种 string
date 日期 string YYYYMMDD
"""
vars = [i for i in vars if i in cons.market_var['cffex']]
date = cons.convert_date(date) if date is not None else datetime.date.today()
if date < datetime.date(2010,4,16):
print(Exception("cffex数据源开始日期为20100416,跳过"))
return {}
if date.strftime('%Y%m%d') not in calendar:
print('%s非交易日' % date.strftime('%Y%m%d'))
return {}
D={}
for var in vars:
url = cons.CFFEX_VOLRANK_URL % (date.strftime('%Y%m'), date.strftime('%d'), var)
r = requests_link(url,encoding='gbk')
if r == None:
return False
if '网页错误' not in r.text:
table = pd.read_csv(StringIO(r.text.split('\n交易日,')[1]))
table = table.dropna(how='any')
table = table.applymap(lambda x: x.strip() if type(x)==type('') else x)
for symbol in set(table['合约']):
tableCut =table[table['合约'] == symbol]
tableCut.columns = ['symbol','rank']+rank_columns
tableCut = _tableCut_cal(tableCut,symbol)
D[symbol] = tableCut.reset_index(drop=True)
return D
def _tableCut_cal(tableCut, symbol):
var = symbol2varietie(symbol)
tableCut[intColumns+['rank']] = tableCut[intColumns+['rank']].astype(int)
tableCut_sum = tableCut.sum()
tableCut_sum['rank'] = 999
for col in ['vol_party_name', 'long_party_name', 'short_party_name']:
tableCut_sum[col] = None
tableCut = tableCut.append(pd.DataFrame(tableCut_sum).T)
tableCut['symbol'] = symbol
tableCut['var'] = var
return tableCut
if __name__ == '__main__':
df = get_czce_rank_table('20080221')
print(df)
|
{"/opendatatools/realestate/__init__.py": ["/opendatatools/realestate/realestate_interface.py"], "/opendatatools/stock/__init__.py": ["/opendatatools/stock/stock_interface.py"], "/opendatatools/common/__init__.py": ["/opendatatools/common/string_util.py"], "/opendatatools/stock/stock_agent.py": ["/opendatatools/common/__init__.py"], "/opendatatools/hkex/__init__.py": ["/opendatatools/hkex/hkex_interface.py"], "/opendatatools/stock/stock_interface.py": ["/opendatatools/stock/stock_agent.py", "/opendatatools/common/__init__.py"], "/opendatatools/fx/fx_interface.py": ["/opendatatools/common/__init__.py"]}
|
30,402,118
|
18505161903/fwshare
|
refs/heads/master
|
/futures/数据库查询删除操作.py
|
# encoding: utf-8
import pymongo,json
import pandas as pd
from pandas import Series,DataFrame
import csv
from scipy.stats import pearsonr
import numpy as np
import matplotlib.pyplot as plt
import matplotlib
import time,datetime
# from datetime import datetime
pd.set_option('display.width', None) # 设置字符显示宽度
pd.set_option('display.max_rows', None) # 设置显示最大行
pd.set_option('display.max_columns', None) # 设置显示最大行
client = pymongo.MongoClient('localhost', 27017)
futures = client.futures2
market = futures.position
# market = DataFrame(list(market.find({'date': {'$gte': '20190601'}})))
# # 删除数据
begin = DataFrame(list(market.find({}).sort([('_id', -1)]).limit(1)))
print(begin.head())
begin = begin['date'][0]
print("lastdate: "+begin)
from pandas import Series, DataFrame
import pandas as pd
import numpy as np
from datetime import datetime
import time
# dr=['2001-1-1','2030-1-1']
# d=pd.to_datetime(dr)
# d=pd.DataFram(d)
# d=pd.date_range("20010101","20200101").strftime('%Y%m%d')
# d=pd.Series(d)
#
# d.replace(',',' ,',inplace=True)
# d.to_json(r'e:\2020.json',orient='records')
#
# print(d)
# df = DataFrame(list(market.find({'date': {'$gte': '20190618'}, 'variety': 'EG'})))
# print(df)
# market.delete_many({'date': {'$gte': '20200208'}})
#
# begin = DataFrame(list(market.find({}).sort([('_id', -1)]).limit(1)))
# begin = begin['date'][0]
# print("lastdate: "+begin)
|
{"/opendatatools/realestate/__init__.py": ["/opendatatools/realestate/realestate_interface.py"], "/opendatatools/stock/__init__.py": ["/opendatatools/stock/stock_interface.py"], "/opendatatools/common/__init__.py": ["/opendatatools/common/string_util.py"], "/opendatatools/stock/stock_agent.py": ["/opendatatools/common/__init__.py"], "/opendatatools/hkex/__init__.py": ["/opendatatools/hkex/hkex_interface.py"], "/opendatatools/stock/stock_interface.py": ["/opendatatools/stock/stock_agent.py", "/opendatatools/common/__init__.py"], "/opendatatools/fx/fx_interface.py": ["/opendatatools/common/__init__.py"]}
|
30,402,119
|
18505161903/fwshare
|
refs/heads/master
|
/example/slopedata.py
|
# encoding: utf-8
import datetime
import pandas as pd
import json
from pymongo import MongoClient
import fushare
pd.set_option('display.width', None) # 设置字符显示宽度
pd.set_option('display.max_rows', None) # 设置显示最大行
pd.set_option('display.max_columns', None) # 设置显示最大行
def get_trade_rank(market = 'SHF', date = None):
if date is None:
date = get_target_date(-1, "%Y-%m-%d")
if market == 'SHF':
return fushare.get_shfe_rank_table(date)
if market == 'DCE':
return fushare.get_dce_rank_table(date)
if market == 'CZC':
return fushare.get_czce_rank_table(date)
if market == "CFE":
return fushare.get_cffex_rank_table(date)
return None, '不支持的市场类型'
if __name__ == '__main__':
markets = ['SHF','DCE']#, 'SHF','CZC', 'DCE', 'CFE'
# 连接数据库
client = MongoClient('localhost', 27017)
db = client.futures
position = db.position
begin = datetime.date(2018, 11, 1)
end = datetime.date(2018, 11, 2)
for i in range((end - begin).days + 1):
day = begin + datetime.timedelta(days=i)
date = day.strftime('%Y%m%d')
slope = fushare.get_rollYield_bar(type='symbol', date=date)
slope['date']=date
print(slope)
|
{"/opendatatools/realestate/__init__.py": ["/opendatatools/realestate/realestate_interface.py"], "/opendatatools/stock/__init__.py": ["/opendatatools/stock/stock_interface.py"], "/opendatatools/common/__init__.py": ["/opendatatools/common/string_util.py"], "/opendatatools/stock/stock_agent.py": ["/opendatatools/common/__init__.py"], "/opendatatools/hkex/__init__.py": ["/opendatatools/hkex/hkex_interface.py"], "/opendatatools/stock/stock_interface.py": ["/opendatatools/stock/stock_agent.py", "/opendatatools/common/__init__.py"], "/opendatatools/fx/fx_interface.py": ["/opendatatools/common/__init__.py"]}
|
30,402,120
|
18505161903/fwshare
|
refs/heads/master
|
/example/fushare1.1.11/__init__.py
|
# -*- coding:utf-8 -*-
"""
版本改动记录:
1.1.7:
郑商所的仓单数据有些输出的格式是文本,改成int型;
郑商所有一些时间段得到的仓单是仓单变化量,修正此bug
1.1.8:
上期所网站丢失了两个交易日20100416、20130821的数据,在调取此数据时返回None
1.1.9:
基差数据、会员持仓数据、仓单数据,在爬取过早日期时,出现交易所/生意社网还未发布源数据时,跳过并提示用户数据起始日期;
修正了基差数据第二次爬取时,由于用LATEST网址格式,出现日期不匹配跳过的问题;
修改了郑商所会员持仓数据在2010年8月25日前爬取失败的问题
在爬取基差数据和会员持仓数据时,如果出现连续爬取失败超过限制,直接返回已爬过的数据
1.1.10:
增加了原油的中文名称
1.1.11:
上期所抓取合约日线价格时,排除了‘合计’项
1.1.12:
大商所拿到持仓排名的DataFrame的index有重复值,增加reset_index
"""
__version__ = '1.1.12'
__author__ = 'Lowin'
"""
大宗商品现货价格及基差
"""
from fushare.basis import (get_spotPrice_daily,
get_spotPrice)
"""
期货持仓成交排名数据
"""
from fushare.cot import (get_rank_sum_daily,
get_rank_sum,
get_shfe_rank_table,
get_czce_rank_table,
get_dce_rank_table,
get_cffex_rank_table)
"""
大宗商品仓单数据
"""
from fushare.receipt import (get_reciept)
"""
大宗商品仓单数据
"""
from fushare.rollYield import (get_rollYield_bar, get_rollYield)
"""
交易所行情数据日线
"""
from fushare.dailyBar import (get_cffex_daily,
get_czce_daily,
get_shfe_vwap,
get_shfe_daily,
get_dce_daily,
get_future_daily)
|
{"/opendatatools/realestate/__init__.py": ["/opendatatools/realestate/realestate_interface.py"], "/opendatatools/stock/__init__.py": ["/opendatatools/stock/stock_interface.py"], "/opendatatools/common/__init__.py": ["/opendatatools/common/string_util.py"], "/opendatatools/stock/stock_agent.py": ["/opendatatools/common/__init__.py"], "/opendatatools/hkex/__init__.py": ["/opendatatools/hkex/hkex_interface.py"], "/opendatatools/stock/stock_interface.py": ["/opendatatools/stock/stock_agent.py", "/opendatatools/common/__init__.py"], "/opendatatools/fx/fx_interface.py": ["/opendatatools/common/__init__.py"]}
|
30,402,121
|
18505161903/fwshare
|
refs/heads/master
|
/futures/market.py
|
# encoding: utf-8
import pandas as pd
import datetime,time
import json
from pymongo import MongoClient
import fushare as ak
# import fushare as ak
from pandas import DataFrame
pd.set_option('display.width', None) # 设置字符显示宽度
pd.set_option('display.max_rows', None) # 设置显示最大行
pd.set_option('display.max_columns', None) # 设置显示最大行
if __name__ == '__main__':
# 连接数据库
client = MongoClient('localhost', 27017)
db = client.futures2
market = db.market3
# 填写日期
# today = datetime.date.today()
# end=today
# begin = DataFrame(list(market.find({}).sort([('_id', -1)]).limit(1)))
# begin = begin['date'][0]
# begin = time.strptime(begin, "%Y%m%d")
# year, month, day = begin[:3]
# begin = datetime.date(year, month, day)
# begin = begin + datetime.timedelta(days=1)
begin = datetime.date(2013, 4, 9)
end = datetime.date(2020, 8, 11)
for i in range((end - begin).days + 1):
day = begin + datetime.timedelta(days=i)
days = day.strftime('%Y%m%d')
# 四大交易所行情
dce =ak.get_dce_daily(days)
# shf =ak.get_shfe_daily(days)
# zce =ak.get_czce_daily(days)
# cff = ak.get_cffex_daily(days)
frames = [dce]#dce,shf,zce,cff
try:
# 合并四大交易所行情表
df2 = pd.concat(frames)
df2 = df2.dropna(axis=0, how='any')
df2 = df2.apply(pd.to_numeric, errors="ignore")
df2 = df2.reset_index()
df2['date'] = days
# print(df2.info())
del df2['index']
market.insert_many(json.loads(df2.T.to_json()).values())
print(json.loads(df2.T.to_json()).values())
except:
print(days, '数据异常')
continue
|
{"/opendatatools/realestate/__init__.py": ["/opendatatools/realestate/realestate_interface.py"], "/opendatatools/stock/__init__.py": ["/opendatatools/stock/stock_interface.py"], "/opendatatools/common/__init__.py": ["/opendatatools/common/string_util.py"], "/opendatatools/stock/stock_agent.py": ["/opendatatools/common/__init__.py"], "/opendatatools/hkex/__init__.py": ["/opendatatools/hkex/hkex_interface.py"], "/opendatatools/stock/stock_interface.py": ["/opendatatools/stock/stock_agent.py", "/opendatatools/common/__init__.py"], "/opendatatools/fx/fx_interface.py": ["/opendatatools/common/__init__.py"]}
|
30,402,122
|
18505161903/fwshare
|
refs/heads/master
|
/example/futures_position.py
|
# encoding: utf-8
import datetime
import pandas as pd
import json
from pymongo import MongoClient
import fushare
pd.set_option('display.width', None) # 设置字符显示宽度
pd.set_option('display.max_rows', None) # 设置显示最大行
pd.set_option('display.max_columns', None) # 设置显示最大行
def get_trade_rank(market = 'SHF', date = None):
if date is None:
date = get_target_date(-1, "%Y-%m-%d")
if market == 'SHF':
return fushare.get_shfe_rank_table(date)
if market == 'DCE':
return fushare.get_dce_rank_table(date)
if market == 'CZC':
return fushare.get_czce_rank_table(date)
if market == "CFE":
return fushare.get_cffex_rank_table(date)
return None, '不支持的市场类型'
if __name__ == '__main__':
markets = ['SHF']#, 'CZC', 'SHF','CFE','DCE'
# 连接数据'C'CZC', 'SHF','CFE','DCE'ZC', 'SHF','CFE','DCE'库
client = MongoClient('localhost', 27017)
db = client.futures5
position = db.position
for market in markets:
begin = datetime.date(2019,6,19)
end = datetime.date(2019, 1, 8)
for i in range((end - begin).days + 1):
day = begin + datetime.timedelta(days=i)
days=day.strftime('%Y%m%d')
try:
df = get_trade_rank(market, date=days)
print(days, market)
for key, value in df.items():
value['date'] = days
if market != 'CZC':
print('insert into',key)
position.insert(json.loads(value.T.to_json()).values())
else:
value=value[value['symbol']==value['variety']]
print('insert into',key)
position.insert(json.loads(value.T.to_json()).values())
except:
print(days,market,'数据异常')
continue
|
{"/opendatatools/realestate/__init__.py": ["/opendatatools/realestate/realestate_interface.py"], "/opendatatools/stock/__init__.py": ["/opendatatools/stock/stock_interface.py"], "/opendatatools/common/__init__.py": ["/opendatatools/common/string_util.py"], "/opendatatools/stock/stock_agent.py": ["/opendatatools/common/__init__.py"], "/opendatatools/hkex/__init__.py": ["/opendatatools/hkex/hkex_interface.py"], "/opendatatools/stock/stock_interface.py": ["/opendatatools/stock/stock_agent.py", "/opendatatools/common/__init__.py"], "/opendatatools/fx/fx_interface.py": ["/opendatatools/common/__init__.py"]}
|
30,402,123
|
18505161903/fwshare
|
refs/heads/master
|
/futures/净持仓拉数据.py
|
import tushare as ts
import pandas as pd
from pymongo import MongoClient
import json
import datetime
import matplotlib.pyplot as plt
client = MongoClient('localhost', 27017)
db = client.futures3
jd = db.jd
pro = ts.pro_api('c0cad8f56caba4e70702d606290d04f88514a6bef046f60d13144151')
df = pro.fut_holding( symbol='JD2006', exchange='DCE'
df2=df.fillna(0)
df2=df2.loc[df2['broker']!='期货公司会员']
print(df2)
jd.insert_many(json.loads(df2.T.to_json()).values())
print(json.loads(df2.T.to_json()).values())
|
{"/opendatatools/realestate/__init__.py": ["/opendatatools/realestate/realestate_interface.py"], "/opendatatools/stock/__init__.py": ["/opendatatools/stock/stock_interface.py"], "/opendatatools/common/__init__.py": ["/opendatatools/common/string_util.py"], "/opendatatools/stock/stock_agent.py": ["/opendatatools/common/__init__.py"], "/opendatatools/hkex/__init__.py": ["/opendatatools/hkex/hkex_interface.py"], "/opendatatools/stock/stock_interface.py": ["/opendatatools/stock/stock_agent.py", "/opendatatools/common/__init__.py"], "/opendatatools/fx/fx_interface.py": ["/opendatatools/common/__init__.py"]}
|
30,402,124
|
18505161903/fwshare
|
refs/heads/master
|
/22/pro.py
|
# encoding:utf-8
import pandas as pd
from pandas import DataFrame
import matplotlib.pyplot as plot
import math
#target_url = ("https://archive.ics.uci.edu/ml/machine-learning-databases/undocumented/connectionist-bench/sonar/sonar.all-data")
#rockVMines = pd.read_csv(target_url ,header=None,prefix="V") #prefix前缀
rockVMines = pd.read_csv('../rockdata.txt',header=None,prefix="V") #prefix前缀
row2 = rockVMines.iloc[1,0:60]
row3 = rockVMines.iloc[2,0:60]
n = len(row2)
mean2 = row2.mean()
mean3 = row3.mean()
t2=0 ; t3=0;t1=0
for i in range(n):
t2 += (row2[i] - mean2) * (row2[i] - mean2) / n
t3 += (row3[i] - mean3) * (row3[i] - mean3) / n
r23=0
for i in range(n):
r23 += (row2[i] - mean2)*(row3[i] - mean3)/(n* math.sqrt(t2 * t3))
print(r23)
corMat = DataFrame(rockVMines.corr()) #corr 求相关系数矩阵
print(corMat)
plot.pcolor(corMat)
plot.show()
|
{"/opendatatools/realestate/__init__.py": ["/opendatatools/realestate/realestate_interface.py"], "/opendatatools/stock/__init__.py": ["/opendatatools/stock/stock_interface.py"], "/opendatatools/common/__init__.py": ["/opendatatools/common/string_util.py"], "/opendatatools/stock/stock_agent.py": ["/opendatatools/common/__init__.py"], "/opendatatools/hkex/__init__.py": ["/opendatatools/hkex/hkex_interface.py"], "/opendatatools/stock/stock_interface.py": ["/opendatatools/stock/stock_agent.py", "/opendatatools/common/__init__.py"], "/opendatatools/fx/fx_interface.py": ["/opendatatools/common/__init__.py"]}
|
30,402,125
|
18505161903/fwshare
|
refs/heads/master
|
/futures/newsingnal.py
|
import json
import pandas as pd
import pymongo
# 连接数据库
client = pymongo.MongoClient('localhost', 27017)
futures = client.futures
df2=pd.read_csv(r'E:\fit.csv',',',encoding='gbk')
df = pd.DataFrame(df2)
df=df.dropna().drop_duplicates()
futures.mainSignal.insert_many(json.loads(df.T.to_json()).values())
print(json.loads(df.T.to_json()).values())
|
{"/opendatatools/realestate/__init__.py": ["/opendatatools/realestate/realestate_interface.py"], "/opendatatools/stock/__init__.py": ["/opendatatools/stock/stock_interface.py"], "/opendatatools/common/__init__.py": ["/opendatatools/common/string_util.py"], "/opendatatools/stock/stock_agent.py": ["/opendatatools/common/__init__.py"], "/opendatatools/hkex/__init__.py": ["/opendatatools/hkex/hkex_interface.py"], "/opendatatools/stock/stock_interface.py": ["/opendatatools/stock/stock_agent.py", "/opendatatools/common/__init__.py"], "/opendatatools/fx/fx_interface.py": ["/opendatatools/common/__init__.py"]}
|
30,402,126
|
18505161903/fwshare
|
refs/heads/master
|
/example/test4.py
|
# encoding: utf-8
import pandas as pd
import datetime
import json
from pymongo import MongoClient
import tushare as fushare
print(fushare.__version__)
pd.set_option('display.width', None) # 设置字符显示宽度
pd.set_option('display.max_rows', None) # 设置显示最大行
pd.set_option('display.max_columns', None) # 设置显示最大行
if __name__ == '__main__':
# 连接数据库
client = MongoClient('localhost', 27017)
db = client.futures
market = db.market
# 填写日期
begin = datetime.date(2019,6, 10)
end = datetime.date(2019,6, 10)
pro = fushare.pro_api('c0cad8f56caba4e70702d606290d04f88514a6bef046f60d13144151')
for i in range((end - begin).days + 1):
day = begin + datetime.timedelta(days=i)
days = day.strftime('%Y%m%d')
# df = pro.fut_daily(trade_date=days, exchange='',
# fields='ts_code,trade_date,pre_close,pre_settle,open,high,low,close,settle,vol')
df = pro.fut_holding(trade_date=days)
frames = [df]
print(df)
try:
# 合并四大交易所行情
df2 = pd.concat(frames)
# 计算品种指数收盘价
df2 = df2.dropna(axis=0, how='any')
# df2 = df2.convert_objects(convert_numeric=True)
df2 = df2.apply(pd.to_numeric, errors="ignore")
df2['closev'] = df2['close'] * df2['volume']
df2['openv'] = df2['open'] * df2['volume']
df2 = df2.groupby('variety')['volume', 'closev','openv'].sum()
df2['set_close'] = round(df2['closev'] / df2['volume'])
df2['set_open'] = round(df2['openv'] / df2['volume'])
df2['change'] = df2['set_close'] - df2['set_open']
df2['date'] = days
df2 = df2.dropna(axis=0, how='any')
df2 = df2.reset_index()
df2 = df2[['date', 'variety', 'set_close','set_open','change']]
# print(df2)
market.insert(json.loads(df2.T.to_json()).values())
print(json.loads(df2.T.to_json()).values())
except:
print(days, '数据异常')
continue
|
{"/opendatatools/realestate/__init__.py": ["/opendatatools/realestate/realestate_interface.py"], "/opendatatools/stock/__init__.py": ["/opendatatools/stock/stock_interface.py"], "/opendatatools/common/__init__.py": ["/opendatatools/common/string_util.py"], "/opendatatools/stock/stock_agent.py": ["/opendatatools/common/__init__.py"], "/opendatatools/hkex/__init__.py": ["/opendatatools/hkex/hkex_interface.py"], "/opendatatools/stock/stock_interface.py": ["/opendatatools/stock/stock_agent.py", "/opendatatools/common/__init__.py"], "/opendatatools/fx/fx_interface.py": ["/opendatatools/common/__init__.py"]}
|
30,402,127
|
18505161903/fwshare
|
refs/heads/master
|
/example/futuresPositionCsv.py
|
# encoding: utf-8
from opendatatools import futures
import datetime, os
import pandas as pd
import time
if __name__ == '__main__':
begin_year = 2018
end_year = 2018
begin_month = 7
end_month = 7
begin_day = 5
end_day = 10
time_sleep = 3
# markets = ['SHF']
markets = ['SHF', 'CZC', 'DCE', 'CFE'] # , 'SHF','CZC', 'DCE', 'CFE'
if os.path.exists(r"c:\FuturesPosition.csv"):
os.remove(r"c:\FuturesPosition.csv")
for market in markets:
begin = datetime.date(begin_year, begin_month, begin_day)
end = datetime.date(end_year, end_month, end_day)
for i in range((end - begin).days + 1):
day = begin + datetime.timedelta(days=i)
days = day.strftime('%Y-%m-%d')
print(days)
# df, msg = futures.get_trade_rank(market, date=days)
try:
# df, msg = futures.get_trade_rank(market, date='2014-06-05')
df, msg = futures.get_trade_rank(market, date=days)
except:
print(days, market, '数据异常')
continue
if os.path.exists(r"C:/FuturesPosition.csv"):
df.to_csv(r"C:/FuturesPosition.csv", mode='a', encoding='ANSI', header=False)
else:
df.to_csv(r"C:/FuturesPosition.csv", encoding='ANSI')
time.sleep(time_sleep)
df = pd.read_csv('C:/FuturesPosition.csv', encoding='ANSI')
df = df[['date', 'variety', '持买仓量', '持卖仓量']]
a = []
b = []
aa = []
bb = []
cc = []
for i in df['持买仓量']:
i = str(i)
i = i.replace(',', '')
i = i.replace('-', '')
if i == '':
a.append(0)
else:
i = float(i)
a.append(i)
for i in df['持卖仓量']:
i = str(i)
i = i.replace(',', '')
i = i.replace('-', '')
if i == '':
b.append(0)
else:
i = float(i)
b.append(i)
df['持买仓量'] = a
df['持卖仓量'] = b
df = df.groupby(['date', 'variety']).sum()
df['净持仓'] = df['持买仓量'] - df['持卖仓量']
df.to_csv('C:/FuturesPosition.csv', encoding='ANSI')
time.sleep(time_sleep)
a = []
b = []
c = []
df = pd.read_csv('C:/FuturesPosition.csv', encoding='ANSI')
if len(str(begin_month)) == 1:
begin_month = '0' + str(begin_month)
if len(str(end_month)) == 1:
end_month = '0' + str(end_month)
if len(str(begin_day)) == 1:
begin_day = '0' + str(begin_day)
if len(str(end_day)) == 1:
end_day = '0' + str(end_day)
df1 = df[df['date'] == str(begin_year) + '/' + str(begin_month) + '/' + str(begin_day)]['净持仓']
df2 = df[df['date'] == str(end_year) + '/' + str(end_month) + '/' + str(end_day)]['净持仓']
for i in df1:
a.append(i)
for i in df2:
b.append(i)
for i in range(len(a)):
c.append(b[i] - a[i])
df = df[df['date'] == str(end_year) + '/' + str(end_month) + '/' + str(end_day)]
df['净持仓变化量'] = c
df.to_csv('C:/FuturesPosition.csv', encoding='ANSI', index=False)
|
{"/opendatatools/realestate/__init__.py": ["/opendatatools/realestate/realestate_interface.py"], "/opendatatools/stock/__init__.py": ["/opendatatools/stock/stock_interface.py"], "/opendatatools/common/__init__.py": ["/opendatatools/common/string_util.py"], "/opendatatools/stock/stock_agent.py": ["/opendatatools/common/__init__.py"], "/opendatatools/hkex/__init__.py": ["/opendatatools/hkex/hkex_interface.py"], "/opendatatools/stock/stock_interface.py": ["/opendatatools/stock/stock_agent.py", "/opendatatools/common/__init__.py"], "/opendatatools/fx/fx_interface.py": ["/opendatatools/common/__init__.py"]}
|
30,402,128
|
18505161903/fwshare
|
refs/heads/master
|
/example/fushare1.1.11/requests_fun.py
|
# -*- coding:utf-8 -*-
"""
Created on 2018年07月18日
@author: lowin
@contact: li783170560@126.com
用requests包爬取网站内容,在链接失败后可重复爬取
"""
import requests
import time
import pandas as pd
import urllib
def requests_link(url,encoding='utf-8'):
"""
爬取网站内容,如网站链接失败,可重复爬取20次
Parameters
------
url: 网站 string
encoding: 编码类型 string:’utf-8‘、’gbk‘等
Return
-------
r: 爬取返回内容 response:
"""
i=0
while True:
try:
r = requests.get(url,timeout = 5)
r.encoding = encoding
return r
except:
i+=1
print('第%s次链接失败最多20次' %str(i))
time.sleep(5)
if i>20:
return None
def pandas_readHtml_link(url,encoding='utf-8'):
"""
爬取网站内容,如网站链接失败,可重复爬取20次
Parameters
------
url: 网站 string
encoding: 编码类型 string:’utf-8‘、’gbk‘等
Return
-------
r: 爬取返回内容 response:
"""
i=0
while True:
try:
r = pd.read_html(url,encoding = encoding)
return r
except:
i+=1
print('第%s次链接失败最多20次' %str(i))
time.sleep(5)
if i>20:
return None
def urllib_request_link(url,encoding='utf-8'):
"""
爬取网站内容,如网站链接失败,可重复爬取20次
Parameters
------
url: 网站 string
encoding: 编码类型 string:’utf-8‘、’gbk‘等
Return
-------
r: 爬取返回内容 response:
"""
i=0
while True:
try:
texts = urllib.request.urlopen(url).readlines()
return texts
except:
i+=1
print('第%s次链接失败最多20次' %str(i))
time.sleep(5)
if i>20:
return None
|
{"/opendatatools/realestate/__init__.py": ["/opendatatools/realestate/realestate_interface.py"], "/opendatatools/stock/__init__.py": ["/opendatatools/stock/stock_interface.py"], "/opendatatools/common/__init__.py": ["/opendatatools/common/string_util.py"], "/opendatatools/stock/stock_agent.py": ["/opendatatools/common/__init__.py"], "/opendatatools/hkex/__init__.py": ["/opendatatools/hkex/hkex_interface.py"], "/opendatatools/stock/stock_interface.py": ["/opendatatools/stock/stock_agent.py", "/opendatatools/common/__init__.py"], "/opendatatools/fx/fx_interface.py": ["/opendatatools/common/__init__.py"]}
|
30,402,129
|
18505161903/fwshare
|
refs/heads/master
|
/example/ft2backtest.py
|
import ft2
file=r'E:\FENG临摹\RB2009临摹1.xlsx'
ft2.backtesting(file)
# ft2.backtestingfee(file,unit=,minprice=,fee=)
|
{"/opendatatools/realestate/__init__.py": ["/opendatatools/realestate/realestate_interface.py"], "/opendatatools/stock/__init__.py": ["/opendatatools/stock/stock_interface.py"], "/opendatatools/common/__init__.py": ["/opendatatools/common/string_util.py"], "/opendatatools/stock/stock_agent.py": ["/opendatatools/common/__init__.py"], "/opendatatools/hkex/__init__.py": ["/opendatatools/hkex/hkex_interface.py"], "/opendatatools/stock/stock_interface.py": ["/opendatatools/stock/stock_agent.py", "/opendatatools/common/__init__.py"], "/opendatatools/fx/fx_interface.py": ["/opendatatools/common/__init__.py"]}
|
30,402,130
|
18505161903/fwshare
|
refs/heads/master
|
/22/1.py
|
#-*- coding:utf-8 -*-
import pymongo
from multiprocessing import Pool
client = pymongo.MongoClient('localhost')
# client = MongoClient('localhost', 27017)
db = client.futures
market = db.market
#数据库中删除重复的数据
def save_to_mongo(market):
market['host_info_new'].update({'variety': market['variety'], 'date': market['date']}, {'$set': market}, True)
# print(market)
if __name__ == '__main__':
pool = Pool()
s=pool.map(save_to_mongo, [market for market in db['host_info_old'].find()])
#
# s=[market for market in db['host_info_old'].find()]
print(s)
|
{"/opendatatools/realestate/__init__.py": ["/opendatatools/realestate/realestate_interface.py"], "/opendatatools/stock/__init__.py": ["/opendatatools/stock/stock_interface.py"], "/opendatatools/common/__init__.py": ["/opendatatools/common/string_util.py"], "/opendatatools/stock/stock_agent.py": ["/opendatatools/common/__init__.py"], "/opendatatools/hkex/__init__.py": ["/opendatatools/hkex/hkex_interface.py"], "/opendatatools/stock/stock_interface.py": ["/opendatatools/stock/stock_agent.py", "/opendatatools/common/__init__.py"], "/opendatatools/fx/fx_interface.py": ["/opendatatools/common/__init__.py"]}
|
30,402,131
|
18505161903/fwshare
|
refs/heads/master
|
/futures/测试2.py
|
import os
print(os.path.basename(__file__).split('.')[0])
|
{"/opendatatools/realestate/__init__.py": ["/opendatatools/realestate/realestate_interface.py"], "/opendatatools/stock/__init__.py": ["/opendatatools/stock/stock_interface.py"], "/opendatatools/common/__init__.py": ["/opendatatools/common/string_util.py"], "/opendatatools/stock/stock_agent.py": ["/opendatatools/common/__init__.py"], "/opendatatools/hkex/__init__.py": ["/opendatatools/hkex/hkex_interface.py"], "/opendatatools/stock/stock_interface.py": ["/opendatatools/stock/stock_agent.py", "/opendatatools/common/__init__.py"], "/opendatatools/fx/fx_interface.py": ["/opendatatools/common/__init__.py"]}
|
30,402,132
|
18505161903/fwshare
|
refs/heads/master
|
/example/fushare1.1.11/symbolVar.py
|
# -*- coding:utf-8 -*-
import re
from fushare import cons
def symbol2varietie(symbol):
var = ''.join(re.findall('\D',symbol)).upper().strip()
if var == 'PTA':
var = 'TA'
return var
def symbolMarket(symbol):
var = symbol2varietie(symbol)
for (market,vars) in cons.market_var.items():
if var in vars:
return market
def find_chinese(string):
p = re.compile(u'[\u4e00-\u9fa5]')
res = re.findall(p, string)
return ''.join(res)
def chinese_to_english(var):
"""
翻译期货品种中文和英文缩写
"""
chineseList=['橡胶','天然橡胶','石油沥青','沥青','沥青仓库','沥青(仓库)','沥青厂库','沥青(厂库)','热轧卷板','热轧板卷','燃料油',
'白银','线材','螺纹钢','铅','铜','铝','锌','黄金','钯金','锡','镍',
'豆一','大豆','豆二','胶合板','玉米','玉米淀粉','聚乙烯','LLDPE','LDPE','豆粕','豆油','大豆油',
'棕榈油','纤维板','鸡蛋','聚氯乙烯','PVC','聚丙烯','PP','焦炭','焦煤','铁矿石',
'强麦','强筋小麦',' 强筋小麦','硬冬白麦','普麦','硬白小麦','硬白小麦()','皮棉','棉花','一号棉','白糖','PTA','菜籽油','菜油','早籼稻','早籼','甲醇','柴油','玻璃',
'油菜籽','菜籽','菜籽粕','菜粕','动力煤','粳稻','晚籼稻','晚籼','硅铁','锰硅','硬麦','棉纱','苹果',
'原油','中质含硫原油']
englishList=['RU','RU','BU','BU','BU','BU','BU2','BU2','HC','HC','FU','AG','WR','RB','PB','CU','AL','ZN','AU','AU','SN','NI',
'A','A','B','BB','C','CS','L','L','L','M','Y','Y',
'P','FB','JD','V','V','PP','PP','J','JM','I',
'WH','WH','WH','PM','PM','PM','PM','CF','CF','CF','SR','TA','OI','OI','RI','ER','MA','MA','FG',
'RS','RS','RM','RM','ZC','JR','LR','LR','SF','SM','WT','CY','AP',
'SC','SC']
pos=chineseList.index(var)
return(englishList[pos])
if __name__ == '__main__':
print(chinese_to_english('原油'))
symbol = 'rb1801'
var = symbol2varietie('rb1808')
print(var)
market = symbolMarket(symbol)
print(market)
chi = find_chinese('a对方水电费dc大V')
print(chi)
|
{"/opendatatools/realestate/__init__.py": ["/opendatatools/realestate/realestate_interface.py"], "/opendatatools/stock/__init__.py": ["/opendatatools/stock/stock_interface.py"], "/opendatatools/common/__init__.py": ["/opendatatools/common/string_util.py"], "/opendatatools/stock/stock_agent.py": ["/opendatatools/common/__init__.py"], "/opendatatools/hkex/__init__.py": ["/opendatatools/hkex/hkex_interface.py"], "/opendatatools/stock/stock_interface.py": ["/opendatatools/stock/stock_agent.py", "/opendatatools/common/__init__.py"], "/opendatatools/fx/fx_interface.py": ["/opendatatools/common/__init__.py"]}
|
30,402,133
|
18505161903/fwshare
|
refs/heads/master
|
/opendatatools/stock/stock_agent.py
|
# encoding: utf-8
from opendatatools.common import RestAgent
from opendatatools.common import date_convert
import json
import pandas as pd
import io
class SHExAgent(RestAgent):
def __init__(self):
RestAgent.__init__(self)
headers = {
"Accept": '*/*',
'Referer': 'http://www.sse.com.cn/market/sseindex/indexlist/',
'User-Agent': 'Mozilla/5.0 (Windows NT 6.1; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/66.0.3359.181 Safari/537.36',
}
self.add_headers(headers)
def get_index_list(self):
url = 'http://query.sse.com.cn/commonSoaQuery.do'
data = {
'sqlId': 'DB_SZZSLB_ZSLB',
}
response = self.do_request(url, data)
rsp = json.loads(response)
if 'pageHelp' in rsp:
data = rsp['pageHelp']['data']
return pd.DataFrame(data)
else:
return None
def get_index_component(self, index):
url = 'http://query.sse.com.cn/commonSoaQuery.do'
data = {
'sqlId': 'DB_SZZSLB_CFGLB',
'indexCode' : index,
}
response = self.do_request(url, data)
rsp = json.loads(response)
if 'pageHelp' in rsp:
data = rsp['pageHelp']['data']
return pd.DataFrame(data)
else:
return None
def get_dividend(self, code):
url = 'http://query.sse.com.cn/commonQuery.do'
data = {
'sqlId' : 'COMMON_SSE_GP_SJTJ_FHSG_AGFH_L_NEW',
'security_code_a' : code,
}
response = self.do_request(url, data)
rsp = json.loads(response)
if 'result' in rsp:
data = rsp['result']
return pd.DataFrame(data)
else:
return None
def get_rzrq_info(self, date):
url = 'http://www.sse.com.cn/market/dealingdata/overview/margin/a/rzrqjygk%s.xls' % (date)
response = self.do_request(url, None, method='GET', type='binary')
if response is not None:
excel = pd.ExcelFile(io.BytesIO(response))
df_total = excel.parse('汇总信息').dropna()
df_detail = excel.parse('明细信息').dropna()
df_total['date'] = date
df_detail['date'] = date
return df_total, df_detail
else:
return None, None
class SZExAgent(RestAgent):
def __init__(self):
RestAgent.__init__(self)
def get_index_list(self):
url = 'http://www.szse.cn/szseWeb/ShowReport.szse'
data = {
'SHOWTYPE' : 'xls',
'CATALOGID' : '1812',
}
response = self.do_request(url, data, method='GET', type='binary')
df = pd.read_excel(io.BytesIO(response))
return df
def get_index_component(self, index):
url = 'http://www.szse.cn/szseWeb/ShowReport.szse'
data = {
'SHOWTYPE': 'xls',
'CATALOGID': '1747',
'ZSDM' : index
}
response = self.do_request(url, data, method='GET', type='binary')
if response is not None:
df = pd.read_excel(io.BytesIO(response))
return df
else:
return None
def get_rzrq_info(self, date):
date = date_convert(date, '%Y%m%d', '%Y-%m-%d')
df_total = self._get_rzrq_total(date)
df_detail = self._get_rzrq_detail(date)
if df_total is not None:
df_total['date'] = date
if df_detail is not None:
df_detail['date'] = date
return df_total, df_detail
def _get_rzrq_total(self, date):
url = 'http://www.szse.cn/szseWeb/ShowReport.szse'
data = {
'SHOWTYPE': 'xls',
'CATALOGID': '1837_xxpl',
'TABKEY' : 'tab1',
"txtDate": date,
}
response = self.do_request(url, data, method='GET', type='binary')
if response is not None and len(response) > 0:
df = pd.read_excel(io.BytesIO(response))
return df
else:
return None
def _get_rzrq_detail(self, date):
url = 'http://www.szse.cn/szseWeb/ShowReport.szse'
data = {
'SHOWTYPE': 'xls',
'CATALOGID': '1837_xxpl',
'TABKEY': 'tab2',
"txtDate" : date,
}
response = self.do_request(url, data, method='GET', type='binary')
if response is not None and len(response) > 0:
df = pd.read_excel(io.BytesIO(response))
return df
else:
return None
class CSIAgent(RestAgent):
def __init__(self):
RestAgent.__init__(self)
def get_index_list(self):
url = 'http://www.csindex.com.cn/zh-CN/indices/index'
page = 1
result_data = []
while True:
data = {
"data_type" : "json",
"page" : page,
}
response = self.do_request(url, data, method='GET')
rsp = json.loads(response)
page = page + 1
print("fetching data at page %d" % (page) )
if "list" in rsp:
result_data.extend(rsp['list'])
if len(rsp['list']) == 0:
break
else:
return None
return pd.DataFrame(result_data)
def get_index_component(self, index):
url = 'http://www.csindex.com.cn/uploads/file/autofile/cons/%scons.xls' % (index)
response = self.do_request(url, None, method='GET', type='binary')
if response is not None:
df = pd.read_excel(io.BytesIO(response))
return df
else:
return None
|
{"/opendatatools/realestate/__init__.py": ["/opendatatools/realestate/realestate_interface.py"], "/opendatatools/stock/__init__.py": ["/opendatatools/stock/stock_interface.py"], "/opendatatools/common/__init__.py": ["/opendatatools/common/string_util.py"], "/opendatatools/stock/stock_agent.py": ["/opendatatools/common/__init__.py"], "/opendatatools/hkex/__init__.py": ["/opendatatools/hkex/hkex_interface.py"], "/opendatatools/stock/stock_interface.py": ["/opendatatools/stock/stock_agent.py", "/opendatatools/common/__init__.py"], "/opendatatools/fx/fx_interface.py": ["/opendatatools/common/__init__.py"]}
|
30,402,134
|
18505161903/fwshare
|
refs/heads/master
|
/example/LSTM/kerasDemoStock.py
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Tue May 7 17:55:28 2019
@author: lg
"""
from matplotlib.dates import DateFormatter, WeekdayLocator, DayLocator, MONDAY, YEARLY
# from matplotlib.finance import quotes_historical_yahoo_ohlc, candlestick_ohlc
# import matplotlib
import tushare as ts
import pandas as pd
import matplotlib.pyplot as plt
from matplotlib.pylab import date2num
import datetime
import numpy as np
from pandas import DataFrame
from numpy import row_stack, column_stack
from mpl_finance import candlestick_ochl
df = ts.get_hist_data('601857', start='2019-01-15', end='2019-05-07')
dd = df[['open', 'high', 'low', 'close']]
from mpl_finance import candlestick_ochl, candlestick_ohlc
# print(dd.values.shape[0])
dd1 = dd.sort_index()
dd2 = dd1.values.flatten()
g1 = dd2[::-1]
g2 = g1[0:120]
g3 = g2[::-1]
gg = DataFrame(g3)
gg.T.to_excel('gg.xls')
# dd3=pd.DataFrame(dd2)
# dd3.T.to_excel('d8.xls')
g = dd2[0:140]
for i in range(dd.values.shape[0] - 34):
s = dd2[i * 4:i * 4 + 140]
g = row_stack((g, s))
fg = DataFrame(g)
print(fg)
fg.to_excel('fg.xls')
# -*- coding: utf-8 -*-
# 建立、训练多层神经网络,并完成模型的检验
# from __future__ import print_function
import pandas as pd
inputfile1 = 'fg.xls' # 训练数据
testoutputfile = 'test_output_data.xls' # 测试数据模型输出文件
data_train = pd.read_excel(inputfile1) # 读入训练数据(由日志标记事件是否为洗浴)
data_mean = data_train.mean()
data_std = data_train.std()
data_train1 = (data_train - data_mean) / 5 # 数据标准化
y_train = data_train1.iloc[:, 120:140].as_matrix() # 训练样本标签列
x_train = data_train1.iloc[:, 0:120].as_matrix() # 训练样本特征
# y_test = data_test.iloc[:,4].as_matrix() #测试样本标签列
from keras.models import Sequential
from keras.layers.core import Dense, Dropout, Activation
model = Sequential() # 建立模型
model.add(Dense(input_dim=120, output_dim=240)) # 添加输入层、隐藏层的连接
model.add(Activation('relu')) # 以Relu函数为激活函数
model.add(Dense(input_dim=240, output_dim=120)) # 添加隐藏层、隐藏层的连接
model.add(Activation('relu')) # 以Relu函数为激活函数
model.add(Dense(input_dim=120, output_dim=120)) # 添加隐藏层、隐藏层的连接
model.add(Activation('relu')) # 以Relu函数为激活函数
model.add(Dense(input_dim=120, output_dim=20)) # 添加隐藏层、输出层的连接
model.add(Activation('sigmoid')) # 以sigmoid函数为激活函数
# 编译模型,损失函数为binary_crossentropy,用adam法求解
model.compile(loss='mean_squared_error', optimizer='adam')
model.fit(x_train, y_train, nb_epoch=100, batch_size=8) # 训练模型
model.save_weights('net.model') # 保存模型参数
inputfile2 = 'gg.xls' # 预测数据
pre = pd.read_excel(inputfile2)
pre_mean = data_mean[0:120]
pre_std = pre.std()
pre1 = (pre - pre_mean) / 10 # 数据标准化
# pre1 = (pre-pre_mean)/pre.std() #数据标准化
pre2 = pre1.iloc[:, 0:120].as_matrix() # 预测样本特征
r = pd.DataFrame(model.predict(pre2))
rt = r * 10 + data_mean[120:140].as_matrix()
print(rt.round(2))
rt.to_excel('rt.xls')
# print(r.values@data_train.iloc[:,116:120].std().values+data_mean[116:120].as_matrix())
a = list(df.index[0:-1])
b = a[0]
c = datetime.datetime.strptime(b, '%Y-%m-%d')
d = date2num(c)
c1 = [d + i + 1 for i in range(5)]
c2 = np.array([c1])
r1 = rt.values.flatten()
r2 = r1[0:4]
for i in range(4):
r3 = r1[i * 4 + 4:i * 4 + 8]
r2 = row_stack((r2, r3))
c3 = column_stack((c2.T, r2))
r5 = DataFrame(c3)
if len(c3) == 0:
raise SystemExit
fig, ax = plt.subplots()
fig.subplots_adjust(bottom=0.2)
# ax.xaxis.set_major_locator(mondays)
# ax.xaxis.set_minor_locator(alldays)
# ax.xaxis.set_major_formatter(mondayFormatter)
# ax.xaxis.set_minor_formatter(dayFormatter)
# plot_day_summary(ax, quotes, ticksize=3)
# candlestick_ochl(ax, c3, width=0.6, colorup='r', colordown='g')
candlestick_ohlc(ax, c3, width=0.5, colorup='r', colordown='g')
ax.xaxis_date()
ax.autoscale_view()
plt.setp(plt.gca().get_xticklabels(), rotation=45, horizontalalignment='right')
ax.grid(True)
# plt.title('000002')
plt.show()
|
{"/opendatatools/realestate/__init__.py": ["/opendatatools/realestate/realestate_interface.py"], "/opendatatools/stock/__init__.py": ["/opendatatools/stock/stock_interface.py"], "/opendatatools/common/__init__.py": ["/opendatatools/common/string_util.py"], "/opendatatools/stock/stock_agent.py": ["/opendatatools/common/__init__.py"], "/opendatatools/hkex/__init__.py": ["/opendatatools/hkex/hkex_interface.py"], "/opendatatools/stock/stock_interface.py": ["/opendatatools/stock/stock_agent.py", "/opendatatools/common/__init__.py"], "/opendatatools/fx/fx_interface.py": ["/opendatatools/common/__init__.py"]}
|
30,402,135
|
18505161903/fwshare
|
refs/heads/master
|
/futures/test.py
|
# encoding: utf-8
import pandas as pd
import datetime,time
import json
from pymongo import MongoClient
import fushare as ak
# import fushare as ak
from pandas import DataFrame
pd.set_option('display.width', None) # 设置字符显示宽度
pd.set_option('display.max_rows', None) # 设置显示最大行
pd.set_option('display.max_columns', None) # 设置显示最大行
if __name__ == '__main__':
# 连接数据库
client = MongoClient('localhost', 27017)
db = client.futures2
market = db.position
# 填写日期
today = datetime.date.today()
end=today
begin = DataFrame(list(market.find({}).sort([('_id', -1)]).limit(1)))
begin = begin['date'][0]
begin = time.strptime(begin, "%Y%m%d")
year, month, day = begin[:3]
begin = datetime.date(year, month, day)
begin = begin + datetime.timedelta(days=1)
print(begin)
import ft2
|
{"/opendatatools/realestate/__init__.py": ["/opendatatools/realestate/realestate_interface.py"], "/opendatatools/stock/__init__.py": ["/opendatatools/stock/stock_interface.py"], "/opendatatools/common/__init__.py": ["/opendatatools/common/string_util.py"], "/opendatatools/stock/stock_agent.py": ["/opendatatools/common/__init__.py"], "/opendatatools/hkex/__init__.py": ["/opendatatools/hkex/hkex_interface.py"], "/opendatatools/stock/stock_interface.py": ["/opendatatools/stock/stock_agent.py", "/opendatatools/common/__init__.py"], "/opendatatools/fx/fx_interface.py": ["/opendatatools/common/__init__.py"]}
|
30,402,136
|
18505161903/fwshare
|
refs/heads/master
|
/example/ocr_demo.py
|
# encoding: utf-8
import tesserocr
from tesserocr import PyTessBaseAPI
from PIL import Image
img1 = Image.open('report.jpg')
code = tesserocr.image_to_text(img1, lang="chi_sim")
print(code.replace(" ", ""))
import fushare
|
{"/opendatatools/realestate/__init__.py": ["/opendatatools/realestate/realestate_interface.py"], "/opendatatools/stock/__init__.py": ["/opendatatools/stock/stock_interface.py"], "/opendatatools/common/__init__.py": ["/opendatatools/common/string_util.py"], "/opendatatools/stock/stock_agent.py": ["/opendatatools/common/__init__.py"], "/opendatatools/hkex/__init__.py": ["/opendatatools/hkex/hkex_interface.py"], "/opendatatools/stock/stock_interface.py": ["/opendatatools/stock/stock_agent.py", "/opendatatools/common/__init__.py"], "/opendatatools/fx/fx_interface.py": ["/opendatatools/common/__init__.py"]}
|
30,402,137
|
18505161903/fwshare
|
refs/heads/master
|
/example/futures_test.py
|
import pymongo
import pandas as pd
import matplotlib as plt
from pandas import Series,DataFrame
pd.set_option('display.width', None) # 设置字符显示宽度
pd.set_option('display.max_rows', None) # 设置显示最大行
pd.set_option('display.max_columns', None) # 设置显示最大行
#连接数据库
client = pymongo.MongoClient('localhost',27017)
futures = client.futures
market = futures.market
unit = futures.unit
position = futures.position
#加载数据
market = DataFrame(list(market.find()))
unit = DataFrame(list(unit.find()))
position = DataFrame(list(position.find()))
# #类型转换
# market['set_close'] = market['set_close'].astype(float)
# unit['unit'] = unit['unit'].astype(float)
#大写字母
position['variety']=position['variety'].str.upper()
#删除/选取某行含有特殊数值的列
position=position.set_index('名次')
#选择需要显示的字段
data1=market[['date','variety','set_close']]
data2=unit[[ 'variety','unit']]
position=position[['date','variety','symbol','持买仓量期货公司','持买仓量', '持买仓量增减','持卖仓量期货公司','持卖仓量', '持卖仓量增减']]
# print(position.head())
#查询会员
members='永安期货'
data3=position[(position['持买仓量期货公司'] == members)]
#汇总合约
data3=data3[['date','variety','持买仓量期货公司','持买仓量']]
data3=data3.groupby(['date','variety','持买仓量期货公司'])[['持买仓量']].sum()
data4=position[(position['持卖仓量期货公司'] == members)]
# print(data4.head())
data4=data4[['date','variety','持卖仓量期货公司','持卖仓量']]
data4=data4.groupby(['date','variety','持卖仓量期货公司'])[['持卖仓量']].sum()
# print(data3)
# print(data4)
#并集
data5=pd.merge(data3,data4, on=['date','variety'],how='outer')
data5['会员简称']=data5.apply(lambda x: members,axis=1)
#nan缺失值填充fillna()为0
data5=data5.fillna(0)
data5['净持仓']=data5.apply(lambda x: x['持买仓量']-x['持卖仓量'],axis=1)
#选择需要显示的字段
data5=data5[['会员简称','持买仓量','持卖仓量','净持仓']]
data5=data5.reset_index(['variety','date'])
# print(data5)
#合约价值
contractValue=pd.merge(data1,data2,how='left',sort=False).drop_duplicates()
contractValue['contractValue'] = contractValue.apply(lambda x: x['set_close']*x['unit'],axis=1)
contractValue=contractValue[['date','variety','contractValue']]
#值替换replace()
# contractValue=contractValue.replace(['TA'],'PTA')
print(contractValue)
# contractValue.set_index(['date','variety'], inplace = True)
sz=pd.merge(data5,contractValue,on=['date','variety'],how='left')
#净持仓价值
sz['净持仓价值']=sz.apply(lambda x: x['净持仓']*x['contractValue'],axis=1)
sz=sz[['date','variety','会员简称','净持仓价值']]
sz=sz[sz['date']=='2018-10-23']
# print(sz)
sz=sz.sort_values(by='净持仓价值')
# print(sz)
sz.plot.bar(x='variety',y='净持仓价值',figsize=(20,16),label=members)
# print(sz['净持仓价值'].sum())
long=sz[sz['净持仓价值']>1e+09]
short=sz[sz['净持仓价值']<-1e+09]
# print(long,short)
#二行即可搞定画图中文乱码
plt.rcParams['font.sans-serif']=['SimHei'] #用来正常显示中文标签
plt.rcParams['axes.unicode_minus']=False #用来正常显示负号
# sz=sz.set_index(['date','variety'])
# print(sz.rank(method="max",ascending=True))
|
{"/opendatatools/realestate/__init__.py": ["/opendatatools/realestate/realestate_interface.py"], "/opendatatools/stock/__init__.py": ["/opendatatools/stock/stock_interface.py"], "/opendatatools/common/__init__.py": ["/opendatatools/common/string_util.py"], "/opendatatools/stock/stock_agent.py": ["/opendatatools/common/__init__.py"], "/opendatatools/hkex/__init__.py": ["/opendatatools/hkex/hkex_interface.py"], "/opendatatools/stock/stock_interface.py": ["/opendatatools/stock/stock_agent.py", "/opendatatools/common/__init__.py"], "/opendatatools/fx/fx_interface.py": ["/opendatatools/common/__init__.py"]}
|
30,402,138
|
18505161903/fwshare
|
refs/heads/master
|
/example/main_wh.py
|
# encoding: utf-8
import pymongo,json
import pandas as pd
from pandas import Series,DataFrame
import csv
from scipy.stats import pearsonr
import numpy as np
import matplotlib.pyplot as plt
import matplotlib
import time,datetime
# from datetime import datetime
pd.set_option('display.width', None) # 设置字符显示宽度
pd.set_option('display.max_rows', None) # 设置显示最大行
pd.set_option('display.max_columns', None) # 设置显示最大行
client = pymongo.MongoClient('localhost', 27017)
futures = client.futures
unit = futures.unit
main = futures.main
yk=futures.yk
start='20190101'
# var='A'
unit = DataFrame(list(unit.find()))
main= DataFrame(list(main.find({'date': {'$gte': start}})))
unit1=unit[['variety','unit']]
# del main['_id']
main['昨日交易信号']=main.groupby('variety')['交易信号'].shift(1)
main=main.dropna()
del main['_id']
# print(main.head())
all_h=pd.merge(main,unit1,on=['variety'])#.drop_duplicates()
# print(all_h1)
all_h['盈亏计算']= all_h.apply(lambda x: x['change'] * x['昨日交易信号'] * x['unit'],axis=1)#.drop_duplicates()
# print(all_h.head())
all_h= all_h.drop_duplicates()
all_h['pnl'] = all_h['盈亏计算'].cumsum()
all_h.to_csv(r"e:\signal.csv", mode='a', encoding='ANSI', header=True)
|
{"/opendatatools/realestate/__init__.py": ["/opendatatools/realestate/realestate_interface.py"], "/opendatatools/stock/__init__.py": ["/opendatatools/stock/stock_interface.py"], "/opendatatools/common/__init__.py": ["/opendatatools/common/string_util.py"], "/opendatatools/stock/stock_agent.py": ["/opendatatools/common/__init__.py"], "/opendatatools/hkex/__init__.py": ["/opendatatools/hkex/hkex_interface.py"], "/opendatatools/stock/stock_interface.py": ["/opendatatools/stock/stock_agent.py", "/opendatatools/common/__init__.py"], "/opendatatools/fx/fx_interface.py": ["/opendatatools/common/__init__.py"]}
|
30,402,139
|
18505161903/fwshare
|
refs/heads/master
|
/opendatatools/hkex/__init__.py
|
# encoding: utf-8
from .hkex_interface import *
__all__ = ['set_proxies', 'get_lgt_share']
|
{"/opendatatools/realestate/__init__.py": ["/opendatatools/realestate/realestate_interface.py"], "/opendatatools/stock/__init__.py": ["/opendatatools/stock/stock_interface.py"], "/opendatatools/common/__init__.py": ["/opendatatools/common/string_util.py"], "/opendatatools/stock/stock_agent.py": ["/opendatatools/common/__init__.py"], "/opendatatools/hkex/__init__.py": ["/opendatatools/hkex/hkex_interface.py"], "/opendatatools/stock/stock_interface.py": ["/opendatatools/stock/stock_agent.py", "/opendatatools/common/__init__.py"], "/opendatatools/fx/fx_interface.py": ["/opendatatools/common/__init__.py"]}
|
30,402,140
|
18505161903/fwshare
|
refs/heads/master
|
/example/LSTM/LSTMRNN.py
|
import pandas
import matplotlib.pyplot as plt
dataset = pandas.read_csv('http-request.csv', usecols=[1], engine='python')
plt.plot(dataset)
plt.show()
|
{"/opendatatools/realestate/__init__.py": ["/opendatatools/realestate/realestate_interface.py"], "/opendatatools/stock/__init__.py": ["/opendatatools/stock/stock_interface.py"], "/opendatatools/common/__init__.py": ["/opendatatools/common/string_util.py"], "/opendatatools/stock/stock_agent.py": ["/opendatatools/common/__init__.py"], "/opendatatools/hkex/__init__.py": ["/opendatatools/hkex/hkex_interface.py"], "/opendatatools/stock/stock_interface.py": ["/opendatatools/stock/stock_agent.py", "/opendatatools/common/__init__.py"], "/opendatatools/fx/fx_interface.py": ["/opendatatools/common/__init__.py"]}
|
30,402,141
|
18505161903/fwshare
|
refs/heads/master
|
/futures/position.py
|
# encoding: utf-8
# import tensorflow as tf
import datetime
import pandas as pd
import json
from pymongo import MongoClient
import fushare as ak
pd.set_option('display.width', None) # 设置字符显示宽度
pd.set_option('display.max_rows', None) # 设置显示最大行
pd.set_option('display.max_columns', None) # 设置显示最大行
def get_trade_rank(market = 'SHF', date = None):
if date is None:
date = get_target_date(-1, "%Y-%m-%d")
if market == 'SHF':
return ak.get_shfe_rank_table(date)
if market == 'DCE':
return ak.get_dce_rank_table(date)
if market == 'CZC':
return ak.get_czce_rank_table(date)
if market == "CFE":
return ak.get_cffex_rank_table(date)
return None, '不支持的市场类型'
if __name__ == '__main__':
markets = ['CZC']#, 'CZC', 'SHF','CFE','DCE'
# 连接数据库
client = MongoClient('localhost', 27017)
db = client.futures2
position = db.position
for market in markets:
begin = datetime.date(2020,10,27)
end = datetime.date(2020,10,27)
# end = datetime.date.today()
for i in range((end - begin).days + 1):
day = begin + datetime.timedelta(days=i)
days=day.strftime('%Y%m%d')
try:
df = get_trade_rank(market, date=days)
# print(days, market)
for key, value in df.items():
value['date'] = days
value['symbol'] = value['symbol'].str.upper()
print(value)
# vars = position[position['variety'] == var]
# position.insert(json.loads(value.T.to_json()).values())
# print(value)
#去除具体合约。因汇总持仓有问题
if market != 'CZC':
print('insert into',key)
# position.insert_many(json.loads(value.T.to_json()).values())
else:
value=value[value['symbol'] ==value['variety']]
print('insert into',key)
# position.insert_many(json.loads(value.T.to_json()).values())
# print(json.loads(value.T.to_json()).values())
except:
print(days,market,'数据异常')
continue
|
{"/opendatatools/realestate/__init__.py": ["/opendatatools/realestate/realestate_interface.py"], "/opendatatools/stock/__init__.py": ["/opendatatools/stock/stock_interface.py"], "/opendatatools/common/__init__.py": ["/opendatatools/common/string_util.py"], "/opendatatools/stock/stock_agent.py": ["/opendatatools/common/__init__.py"], "/opendatatools/hkex/__init__.py": ["/opendatatools/hkex/hkex_interface.py"], "/opendatatools/stock/stock_interface.py": ["/opendatatools/stock/stock_agent.py", "/opendatatools/common/__init__.py"], "/opendatatools/fx/fx_interface.py": ["/opendatatools/common/__init__.py"]}
|
30,402,142
|
18505161903/fwshare
|
refs/heads/master
|
/example/5555.py
|
# encoding: utf-8
import pandas as pd
from pandas import *
import datetime
import json
from pymongo import MongoClient
from collections import defaultdict
pd.set_option('display.width', None) # 设置字符显示宽度
pd.set_option('display.max_rows', None) # 设置显示最大行
pd.set_option('display.max_columns', None) # 设置显示最大行
client = MongoClient('localhost', 27017)
db = client.futures
indexMarket = db.indexMarket
peak = db.peak
unit=db.unit
start='20190601'
# var='JD'
indexMarket = DataFrame(list(indexMarket.find({'date': {'$gte': start}})))
unit = DataFrame(list(unit.find()))
dd=unit['variety']
|
{"/opendatatools/realestate/__init__.py": ["/opendatatools/realestate/realestate_interface.py"], "/opendatatools/stock/__init__.py": ["/opendatatools/stock/stock_interface.py"], "/opendatatools/common/__init__.py": ["/opendatatools/common/string_util.py"], "/opendatatools/stock/stock_agent.py": ["/opendatatools/common/__init__.py"], "/opendatatools/hkex/__init__.py": ["/opendatatools/hkex/hkex_interface.py"], "/opendatatools/stock/stock_interface.py": ["/opendatatools/stock/stock_agent.py", "/opendatatools/common/__init__.py"], "/opendatatools/fx/fx_interface.py": ["/opendatatools/common/__init__.py"]}
|
30,402,143
|
18505161903/fwshare
|
refs/heads/master
|
/example/Slope_test.py
|
# encoding: utf-8
import datetime
import pandas as pd
import json
from pymongo import MongoClient
import fushare
pd.set_option('display.width', None) # 设置字符显示宽度
pd.set_option('display.max_rows', None) # 设置显示最大行
pd.set_option('display.max_columns', None) # 设置显示最大行
def get_trade_rank(market = 'SHF', date = None):
if date is None:
date = get_target_date(-1, "%Y-%m-%d")
if market == 'SHF':
return fushare.get_shfe_rank_table(date)
if market == 'DCE':
return fushare.get_dce_rank_table(date)
if market == 'CZC':
return fushare.get_czce_rank_table(date)
if market == "CFE":
return fushare.get_cffex_rank_table(date)
return None, '不支持的市场类型'
if __name__ == '__main__':
markets = ['CZC', 'SHF','CFE','DCE']#, 'CZC', 'SHF','CFE','DCE'
# 连接数据库
client = MongoClient('localhost', 27017)
db = client.futures
position = db.position
for market in markets:
begin = datetime.date(2018, 10, 1)
end = datetime.date(2018, 10, 12)
for i in range((end - begin).days + 1):
day = begin + datetime.timedelta(days=i)
days=day.strftime('%Y%m%d')
try:
df = get_trade_rank(market, date=days)
print(days, market)
for key, value in df.items():
value['date'] = days
if market != 'CZC':
print('insert into',market)
position.insert(json.loads(value.T.to_json()).values())
else:
value=value[value['symbol']==value['var']]
print('insert into',market)
position.insert(json.loads(value.T.to_json()).values())
except:
print(days,market,'数据异常')
continue
|
{"/opendatatools/realestate/__init__.py": ["/opendatatools/realestate/realestate_interface.py"], "/opendatatools/stock/__init__.py": ["/opendatatools/stock/stock_interface.py"], "/opendatatools/common/__init__.py": ["/opendatatools/common/string_util.py"], "/opendatatools/stock/stock_agent.py": ["/opendatatools/common/__init__.py"], "/opendatatools/hkex/__init__.py": ["/opendatatools/hkex/hkex_interface.py"], "/opendatatools/stock/stock_interface.py": ["/opendatatools/stock/stock_agent.py", "/opendatatools/common/__init__.py"], "/opendatatools/fx/fx_interface.py": ["/opendatatools/common/__init__.py"]}
|
30,402,144
|
18505161903/fwshare
|
refs/heads/master
|
/tests/测试.py
|
import pandas as pd
data=pd.read_excel('d:/2017.xlsx')
print(data)
|
{"/opendatatools/realestate/__init__.py": ["/opendatatools/realestate/realestate_interface.py"], "/opendatatools/stock/__init__.py": ["/opendatatools/stock/stock_interface.py"], "/opendatatools/common/__init__.py": ["/opendatatools/common/string_util.py"], "/opendatatools/stock/stock_agent.py": ["/opendatatools/common/__init__.py"], "/opendatatools/hkex/__init__.py": ["/opendatatools/hkex/hkex_interface.py"], "/opendatatools/stock/stock_interface.py": ["/opendatatools/stock/stock_agent.py", "/opendatatools/common/__init__.py"], "/opendatatools/fx/fx_interface.py": ["/opendatatools/common/__init__.py"]}
|
30,402,145
|
18505161903/fwshare
|
refs/heads/master
|
/example/hkex_lgt_demo.py
|
# encoding: utf-8
from opendatatools import hkex
import pandas as pd
if __name__ == '__main__':
# 获取陆港通 北向 持股 情况
# 参数:市场SH/SZ,日期YYYY-MM-DD
df = hkex.get_lgt_share(market = 'SZ', date = '2018-05-28')
print(df)
|
{"/opendatatools/realestate/__init__.py": ["/opendatatools/realestate/realestate_interface.py"], "/opendatatools/stock/__init__.py": ["/opendatatools/stock/stock_interface.py"], "/opendatatools/common/__init__.py": ["/opendatatools/common/string_util.py"], "/opendatatools/stock/stock_agent.py": ["/opendatatools/common/__init__.py"], "/opendatatools/hkex/__init__.py": ["/opendatatools/hkex/hkex_interface.py"], "/opendatatools/stock/stock_interface.py": ["/opendatatools/stock/stock_agent.py", "/opendatatools/common/__init__.py"], "/opendatatools/fx/fx_interface.py": ["/opendatatools/common/__init__.py"]}
|
30,402,146
|
18505161903/fwshare
|
refs/heads/master
|
/futures/market-fushare.py
|
# encoding: utf-8
import pandas as pd
import datetime,time
import json
from fushare.dailyBar import get_future_daily
# from pymongo import MongoClient
import fushare as ak
# import fushare as ak
from pandas import DataFrame
pd.set_option('display.width', None) # 设置字符显示宽度
pd.set_option('display.max_rows', None) # 设置显示最大行
pd.set_option('display.max_columns', None) # 设置显示最大行
if __name__ == '__main__':
# 连接数据库
# client = MongoClient('localhost', 27017)
# db = client.futures2
# market = db.market3
# 填写日期
begin = datetime.date(2020,10,20)
end = datetime.date(2020,10,20)
for i in range((end - begin).days + 1):
day = begin + datetime.timedelta(days=i)
days = day.strftime('%Y%m%d')
df = pd.DataFrame()
for market in ['dce', 'cffex', 'shfe', 'czce']:
try:
df = df.append(get_future_daily(start=begin, end=end, market=market))
print(df)
#写入db
# market.insert_many(json.loads(df2.T.to_json()).values())
# print(json.loads(df2.T.to_json()).values())
except:
print(days, '数据异常')
continue
|
{"/opendatatools/realestate/__init__.py": ["/opendatatools/realestate/realestate_interface.py"], "/opendatatools/stock/__init__.py": ["/opendatatools/stock/stock_interface.py"], "/opendatatools/common/__init__.py": ["/opendatatools/common/string_util.py"], "/opendatatools/stock/stock_agent.py": ["/opendatatools/common/__init__.py"], "/opendatatools/hkex/__init__.py": ["/opendatatools/hkex/hkex_interface.py"], "/opendatatools/stock/stock_interface.py": ["/opendatatools/stock/stock_agent.py", "/opendatatools/common/__init__.py"], "/opendatatools/fx/fx_interface.py": ["/opendatatools/common/__init__.py"]}
|
30,402,147
|
18505161903/fwshare
|
refs/heads/master
|
/example/futures_unit.py
|
import json
from pymongo import MongoClient
import pandas as pd
if __name__=='__main__':
# 连接数据库
client = MongoClient('localhost', 27017)
futures = client.futures2
data = pd.read_excel(r'e:\unit.xlsx',input_col=0)
df = pd.DataFrame(data)
futures.unit.insert(json.loads(df.T.to_json()).values())
print(json.loads(df.T.to_json()).values())
# # encoding: utf-8
#
# from opendatatools import futures
# import datetime
# import os
# import json
# from pymongo import MongoClient
# import pandas as pd
# if __name__ == '__main__':
#
# markets = ['SHF','CZC', 'DCE']#, 'SHF','CZC', 'DCE', 'CFE'
# # 连接数据库
# client = MongoClient('localhost', 27017)
# db = client.futures
# position = db.position
#
# if os.path.exists(r"c:\FuturesPosition.csv"):
# os.remove(r"c:\FuturesPosition.csv")
# for market in markets:
# begin = datetime.date(2018, 11, 8)
# end = datetime.date(2018, 11, 9)
# for i in range((end - begin).days + 1):
# day = begin + datetime.timedelta(days=i)
# days=day.strftime('%Y-%m-%d')
# print(days)
# try:
# df, msg = futures.get_trade_rank(market, date=days)
# print(days,market)
# #position.insert(json.loads(df.T.to_json()).values())
# # print(json.loads(df.T.to_json()).values())
# except:
# print(days,market,'数据异常')
# continue
# if os.path.exists(r"c:\FuturesPosition.csv"):
# df.to_csv(r"c:\FuturesPosition.csv",mode='a',encoding='ANSI',header=False)
# else:
# df.to_csv(r"c:\FuturesPosition.csv",encoding='ANSI')
# data = pd.read_csv(r'c:\FuturesPosition.csv',encoding = "ANSI")
# df = pd.DataFrame(data)
# position.insert(json.loads(df.T.to_json()).values())
# print(json.loads(df.T.to_json()).values())
|
{"/opendatatools/realestate/__init__.py": ["/opendatatools/realestate/realestate_interface.py"], "/opendatatools/stock/__init__.py": ["/opendatatools/stock/stock_interface.py"], "/opendatatools/common/__init__.py": ["/opendatatools/common/string_util.py"], "/opendatatools/stock/stock_agent.py": ["/opendatatools/common/__init__.py"], "/opendatatools/hkex/__init__.py": ["/opendatatools/hkex/hkex_interface.py"], "/opendatatools/stock/stock_interface.py": ["/opendatatools/stock/stock_agent.py", "/opendatatools/common/__init__.py"], "/opendatatools/fx/fx_interface.py": ["/opendatatools/common/__init__.py"]}
|
30,402,148
|
18505161903/fwshare
|
refs/heads/master
|
/futures/AllNetPosition.py
|
# encoding: utf-8
# import tensorflow as tf
import datetime
from time import time
import pandas as pd
import fushare as ak
pd.set_option('display.width', None) # 设置字符显示宽度
pd.set_option('display.max_rows', None) # 设置显示最大行
pd.set_option('display.max_columns', None) # 设置显示最大行
def get_trade_rank(market = 'SHF', date = None):
if date is None:
date = get_target_date(-1, "%Y-%m-%d")
if market == 'SHF':
return ak.get_shfe_rank_table(date)
if market == 'DCE':
return ak.get_dce_rank_table(date)
if market == 'CZC':
return ak.get_czce_rank_table(date)
if market == "CFE":
return ak.get_cffex_rank_table(date)
return None, '不支持的市场类型'
def handle(path):
"""
handle data
- 去除逗号,
- 转化为浮点数类型
"""
markets = ['CZC','SHF','DCE']#'CZC', 'SHF',
df = pd.DataFrame()
for market in markets:
begin = datetime.date(2020, 10, 27)
end = datetime.date(2020, 10, 27)
# begin = datetime.date.today()
# end = begin
print(str(begin)+' 正在拉取'+market+'...')
for i in range((end - begin).days + 1):
day = begin + datetime.timedelta(days=i)
days = day.strftime('%Y%m%d')
try:
df1 = get_trade_rank(market, date=days)
for key, value in df1.items():
value['date'] = days
if market == 'CZC':
value = value[value['symbol'] == value['variety']]
# print(value)
# value = value.applymap(lambda x: x.replace(',', '').replace("-", ""))
df = df.append(value)
# print(days+' 拉取了' + market+' '+df['symbol'].iloc[-1])
if market == 'SHF':
value = value[-value['rank'].isin([999])]
df = df.append(value)
# print(days+' 拉取了' + market + ' ' + df['symbol'].iloc[-1])
if market == 'DCE':
value = value[-value['rank'].isin([999])]
# value = value.applymap(lambda x: x.replace(',', '').replace("-", ""))
df = df.append(value)
# print(days+' 拉取了' + market+' '+df['symbol'].iloc[-1])
if market == 'CFE':
value = value[-value['rank'].isin([999])]
df = df.append(value)
# print(days+' 拉取了' + market+' '+df['symbol'].iloc[-1])
df = df.append(value)
except:
print(days, market, '数据异常')
continue
df = df.apply(pd.to_numeric, errors="ignore")
#净持仓
long = df.groupby(['date', 'variety', 'long_party_name'])[
['long_openIntr']].sum()
short = df.groupby(['date', 'variety', 'short_party_name'])[
['short_openIntr']].sum()
# # 合并
frames = [long, short]
position = pd.concat(frames, axis=1, sort=True).fillna(0).reset_index()
# 字段更名
position = position.rename(columns={'level_0': 'date', 'level_1': 'variety', 'level_2': 'BrokerID'})
position['net'] = position.apply(lambda x: x['long_openIntr'] - x['short_openIntr'], axis=1)
party_names = ['永安期货', '海通期货', '中信期货', '银河期货', '国泰君安']
df = pd.DataFrame()
for i in party_names:
try:
mem = position[position['BrokerID'] == i]
df1 = pd.DataFrame(mem)
df = df.append(df1)
except:
print('?')
continue
two_level_index_series = df.set_index(['date','variety', 'BrokerID'])['net']
net_df = two_level_index_series.unstack()
net_df['合计'] = net_df.apply(lambda x: x.sum(), axis=1)
net_df = net_df.rename_axis(columns=None).reset_index()
net_df = net_df[['date','variety', '永安期货', '海通期货', '中信期货', '银河期货', '国泰君安', '合计']]
# print(net_df)
net_df.to_excel(path, index=False)
print(net_df)
print('写入文件成功,保存路径: '+path)
return net_df
if __name__ == '__main__':
start = time()
print("Start: " + str(start))
filePath='D:/PyFile/AllNetPosition.xlsx'
handle(filePath)
for i in range(1, 100000000):
pass
stop = time()
print("Stop: " + str(stop))
print('Time spent:'+str(stop - start) + "秒")
|
{"/opendatatools/realestate/__init__.py": ["/opendatatools/realestate/realestate_interface.py"], "/opendatatools/stock/__init__.py": ["/opendatatools/stock/stock_interface.py"], "/opendatatools/common/__init__.py": ["/opendatatools/common/string_util.py"], "/opendatatools/stock/stock_agent.py": ["/opendatatools/common/__init__.py"], "/opendatatools/hkex/__init__.py": ["/opendatatools/hkex/hkex_interface.py"], "/opendatatools/stock/stock_interface.py": ["/opendatatools/stock/stock_agent.py", "/opendatatools/common/__init__.py"], "/opendatatools/fx/fx_interface.py": ["/opendatatools/common/__init__.py"]}
|
30,402,149
|
18505161903/fwshare
|
refs/heads/master
|
/futures/读取CfdBasis.py
|
import pandas as pd
import datetime
from fushare.dailyBar import get_future_daily
from futures.CfdBasis import get_mainSubmainMarket
from futures.CfdBasis import get_mainSubmainMarket_bar
import numpy as np
begin = datetime.date(2020, 10, 20)
end = datetime.date(2020, 10, 30)
for i in range((end - begin).days + 1):
day = begin + datetime.timedelta(days=i)
days = day.strftime('%Y%m%d')
df= pd.DataFrame()
for market in ['dce', 'cffex', 'shfe', 'czce']:
df = df.append(get_future_daily(start=begin, end=end, market=market))
varList = list(set(df['variety']))
dfL = pd.DataFrame()
for var in varList:
try:
ry = get_mainSubmainMarket(days, var)
# print(ry)
if ry:
dfL = dfL.append(pd.DataFrame([ry], index=[var], columns=['差价', 'basicPrice(%)', 'symbol1', 'symbol2', 'M-differ', 'Slope(%)']))
# print(dfL)
except:
pass
dfL['date'] = days
symbolList = list(set(dfL['symbol2']))#远月合约
# print(symbolList)
dfl=pd.DataFrame()
for symbol in symbolList:
# print(symbol)
df=df[['date', 'variety', 'symbol', 'open','close']]
if symbol:
df1= df[df['symbol'] == symbol]
# print(df1)
dfl=dfl.append(df1)
print(dfl)
# df2=pd.DataFrame()
#
# mainSubmainMarket = get_mainSubmainMarket_bar(date=days, type='var')
# # mainSubmainMarket['date']=days
# mainSubmainMarket=df2.append(mainSubmainMarket)
# mainSubmainMarket['date']=days
# mainSubmainMarket = mainSubmainMarket.reset_index()
# mainSubmainMarket = mainSubmainMarket.rename(columns={'index': 'variety', 'symbol2': 'symbol'})
#
# data=pd.merge(dfl,mainSubmainMarket,on=['date','variety','symbol'], how='outer').fillna(0)
#
# data=data.to_excel(r'D:\PyFile\CfdBasis.xlsx',index=False)
# print(data)
# df3 = pd.DataFrame()
# df3['close'] = data['close'] # 收盘价
# df3['change'] = df3['close'] - df3['close'].shift(1) # 当日涨跌
# # df3=df3.dropna()#抛弃nan数据
# print(df3)
#计算持仓
# df3['pos']=0
# df3['pos'][np.sign(df3['change'])]=100000???
#计算每日盈亏和手续费
# df3['pnl']=df3['change']+df['pos']#盈亏
# df['fee']=0#手续费
# df['fee'][df['pos']!=df['pos'].shift(1)]=df['close']*20000*0.0003
# df['netpnl']=df['pnl']-df['fee']#净盈亏
#汇总求和盈亏,绘制资金曲线
# df['cumpnl']=df['netpnl'].cumsum()
# df['cumpnl'].plot()
|
{"/opendatatools/realestate/__init__.py": ["/opendatatools/realestate/realestate_interface.py"], "/opendatatools/stock/__init__.py": ["/opendatatools/stock/stock_interface.py"], "/opendatatools/common/__init__.py": ["/opendatatools/common/string_util.py"], "/opendatatools/stock/stock_agent.py": ["/opendatatools/common/__init__.py"], "/opendatatools/hkex/__init__.py": ["/opendatatools/hkex/hkex_interface.py"], "/opendatatools/stock/stock_interface.py": ["/opendatatools/stock/stock_agent.py", "/opendatatools/common/__init__.py"], "/opendatatools/fx/fx_interface.py": ["/opendatatools/common/__init__.py"]}
|
30,402,150
|
18505161903/fwshare
|
refs/heads/master
|
/example/cftc.py
|
import quandl
import pandas as pd
import json
from pymongo import MongoClient
import datetime
pd.set_option('display.width', None) # 设置字符显示宽度
pd.set_option('display.max_rows', None) # 设置显示最大行
pd.set_option('display.max_columns', None) # 设置显示最大行
client = MongoClient('localhost', 27017)
db = client.futures
market=db.market
position=db.position
main=db.cftc
quandl.ApiConfig.api_key = '-GGCYDJNb2cxMLTvqTho'
d = pd.read_excel(r'E:\code.xlsx', input_col=0)
ds = d[['品种名称', 'code']]
for temp in d['code']:
# print(temp)
try:
data = quandl.get('CFTC/' + temp + '_F_L_ALL', paginate=True)
data['code'] = temp
# data = pd.DataFrame(data[0],columns=list(data['code']))
# print(data)
# data = quandl.get('CFTC/' + temp + '_F_L_ALL', start_date='2018-9-1', end_date='2019-09-1')
# # 净持仓
data['大户净持仓'] = data.apply(lambda x: x['Noncommercial Long'] - x['Noncommercial Short'], axis=1)
# # print(data)
data['套保净持仓'] = data.apply(lambda x: x['Commercial Long'] - x['Commercial Short'], axis=1)
data['散户净持仓'] = data.apply(lambda x: x['Nonreportable Positions Long'] - x['Nonreportable Positions Short'],
axis=1)
# print(data)
# # # 最大值最小值
chg = data[['大户净持仓', '套保净持仓', '散户净持仓']]
# print(chg)
max = chg.rolling(window=156).max().dropna() # ,min_periods=1
min = chg.rolling(window=156).min().dropna() # 156
# print(min.tail(5))
# # # #
hb = pd.merge(max, min, on=['Date'], how='outer')
hb1 = pd.merge(data, hb, on=['Date'], how='outer')
# print(hb1)
# # cot指标
data['大户cot指标(%)'] = round(
hb1.apply(lambda x: ((x['大户净持仓'] - x['大户净持仓_y']) / (x['大户净持仓_x'] - x['大户净持仓_y'])) * 100, axis=1), 2)
data['套保cot指标(%)'] = round(
hb1.apply(lambda x: ((x['套保净持仓'] - x['套保净持仓_y']) / (x['套保净持仓_x'] - x['套保净持仓_y'])) * 100, axis=1), 2)
data['散户cot指标(%)'] = round(
hb1.apply(lambda x: ((x['散户净持仓'] - x['散户净持仓_y']) / (x['散户净持仓_x'] - x['散户净持仓_y'])) * 100, axis=1), 2)
# data = pd.merge(data, ds, on=['code'], how='outer').dropna().drop_duplicates()
print(data.tail(10))
except:
pass
continue
print('完成')
|
{"/opendatatools/realestate/__init__.py": ["/opendatatools/realestate/realestate_interface.py"], "/opendatatools/stock/__init__.py": ["/opendatatools/stock/stock_interface.py"], "/opendatatools/common/__init__.py": ["/opendatatools/common/string_util.py"], "/opendatatools/stock/stock_agent.py": ["/opendatatools/common/__init__.py"], "/opendatatools/hkex/__init__.py": ["/opendatatools/hkex/hkex_interface.py"], "/opendatatools/stock/stock_interface.py": ["/opendatatools/stock/stock_agent.py", "/opendatatools/common/__init__.py"], "/opendatatools/fx/fx_interface.py": ["/opendatatools/common/__init__.py"]}
|
30,402,151
|
18505161903/fwshare
|
refs/heads/master
|
/futures/主次合约测试.py
|
# -*- coding:utf-8 -*-
"""
Created on 2018年07月11日
@author: lowin
@contact: li783170560@126.com
获取各合约展期收益率,日线数据从dailyBar脚本获取
"""
import numpy as np
import matplotlib.pyplot as plt
from fushare.symbolVar import *
from fushare.dailyBar import *
calendar = cons.get_calendar()
def _plot_bar(values,xtick):
fig = plt.figure(1)
ax = fig.add_subplot(111)
ax.bar(range(len(values)), values, color="green")
ax.set_xticks(range(len(xtick)))
ax.set_xticklabels(xtick, fontsize=6)
plt.show()
def _plot(values, xtick):
plt.plot(values,xtick)
plt.show()
def get_rollYield_bar(type = 'symbol', var = 'RB',date= None, start = None, end = None, plot = False):
"""
获取展期收益率
Parameters
------
type = 'symbol':获取某天某品种所有交割月合约的收盘价
type = 'var':获取某天所有品种两个主力合约的展期收益率(展期收益率横截面)
type = ‘date’:获取某品种每天的两个主力合约的展期收益率(展期收益率时间序列)
start: 开始日期 format:YYYYMMDD
end: 结束数据 format:YYYYMMDD
date: 某一天日期 format: YYYYMMDD
var: 合约品种如RB、AL等
Return
-------
DataFrame
展期收益率数据(DataFrame):
ry 展期收益率
index 日期或品种
"""
date = cons.convert_date(date) if date is not None else datetime.date.today()
start = cons.convert_date(start) if start is not None else datetime.date.today()
end = cons.convert_date(end) if end is not None else cons.convert_date(cons.get_latestDataDate(datetime.datetime.now()))
if type == 'symbol':
df = get_future_daily(start=date, end=date, market=symbolMarket(var))
df = df[df['variety'] == var]
if plot:
_plot_bar(df['close'].tolist(), df['symbol'].tolist())
return df
if type == 'var':
df = pd.DataFrame()
for market in ['dce','cffex','shfe','czce']:
df = df.append(get_future_daily(start=date, end=date, market=market))
varList = list(set(df['variety']))
dfL=pd.DataFrame()
for var in varList:
ry = get_rollYield(date, var, df = df)
if ry:
dfL = dfL.append(pd.DataFrame([ry], index=[var], columns=['rollYield','nearBy','deferred']))
dfL['date'] = date
dfL = dfL.sort_values('rollYield')
if plot:
_plot_bar(dfL['rollYield'].tolist(), dfL.index)
return dfL
if type == 'date':
dfL=pd.DataFrame()
while start <= end:
try:
ry = get_rollYield(start, var)
if ry:
dfL = dfL.append(pd.DataFrame([ry], index=[start], columns=['rollYield','nearBy','deferred']))
except:
pass
start += datetime.timedelta(days=1)
if plot:
_plot(pd.to_datetime(dfL.index), dfL['rollYield'].tolist())
return dfL
def get_rollYield(date = None, var = 'IF',symbol1 = None, symbol2 = None, df = None):
"""
获取某一天某一品种(主力和次主力)、或固定两个合约的展期收益率
Parameters
------
date: string 某一天日期 format: YYYYMMDD
var: string 合约品种如RB、AL等
symbol1: string 合约1如rb1810
symbol2: string 合约2如rb1812
df: DataFrame或None 从dailyBar得到合约价格,如果为空就在函数内部抓dailyBar,直接喂给数据可以让计算加快
Return
-------
tuple
rollYield
nearBy
deferred
"""
date = cons.convert_date(date) if date is not None else datetime.date.today()
if date.strftime('%Y%m%d') not in calendar:
warnings.warn('%s非交易日' % date.strftime('%Y%m%d'))
return None
if symbol1:
var = symbol2varietie(symbol1)
if type(df) != type(pd.DataFrame()):
market = symbolMarket(var)
df = get_future_daily(start=date, end=date, market=market)
if var:
df = df[df['variety'] == var].sort_values('open_interest',ascending=False)
df['close']=df['close'].astype('float')
symbol1 = df['symbol'].tolist()[0]
symbol2 = df['symbol'].tolist()[1]
close1 = df['close'][df['symbol'] == symbol1.upper()].tolist()[0]
close2 = df['close'][df['symbol'] == symbol2.upper()].tolist()[0]
A = re.sub(r'\D', '', symbol1)
A1 = int(A[:-2])
A2 = int(A[-2:])
B = re.sub(r'\D', '', symbol2)
B1 = int(B[:-2])
B2 = int(B[-2:])
c = (A1 - B1) * 12 + (A2 - B2)
if close1 == 0 or close2 == 0:
return False
if c > 0:
return np.log(close2/close1)/c*12, symbol2,symbol1
else:
return np.log(close2/close1)/c*12, symbol1,symbol2
if __name__ == '__main__':
d = get_rollYield_bar(type = 'var', date = '20181214',plot = True)
print(d)
|
{"/opendatatools/realestate/__init__.py": ["/opendatatools/realestate/realestate_interface.py"], "/opendatatools/stock/__init__.py": ["/opendatatools/stock/stock_interface.py"], "/opendatatools/common/__init__.py": ["/opendatatools/common/string_util.py"], "/opendatatools/stock/stock_agent.py": ["/opendatatools/common/__init__.py"], "/opendatatools/hkex/__init__.py": ["/opendatatools/hkex/hkex_interface.py"], "/opendatatools/stock/stock_interface.py": ["/opendatatools/stock/stock_agent.py", "/opendatatools/common/__init__.py"], "/opendatatools/fx/fx_interface.py": ["/opendatatools/common/__init__.py"]}
|
30,402,152
|
18505161903/fwshare
|
refs/heads/master
|
/example/LSTM/Keras.py
|
# 简易多层感知神经网络示例
from keras.models import Sequential
from keras.layers import Dense
from pandas import DataFrame
from pymongo import MongoClient
import pandas as pd
import numpy
# 连接数据库
# client = MongoClient('localhost', 27017)
# db = client.futures
# mainSignal = db.mainSignal
# mainSignal=DataFrame(list(mainSignal.find({"variety":"RM"})))
# del mainSignal["_id"]
# del mainSignal["variety"]
# del mainSignal["symbol"]
# mainSignal = mainSignal.rename(columns={'净持仓': 'netPosition', '上一日净持仓': 'pre_netPosition', '净持仓变化量': 'netchange', '交易信号': 'signal'})
# mainSignal = mainSignal.apply(pd.to_numeric, errors="ignore")
# mainSignal.to_csv("mainSignal.csv")
# 加载,预处理数据集
dataset = numpy.loadtxt("mainSignal.csv", delimiter=",")
X = dataset[:,0:8]
Y = dataset[:,8]
# 1. 定义模型
model = Sequential()
model.add(Dense(12, input_dim=8, activation='relu'))
model.add(Dense(1, activation='sigmoid'))
# 2. 编译模型
model.compile(loss='binary_crossentropy', optimizer='adam', metrics=['accuracy'])
# 3. 训练模型
history = model.fit(X, Y, nb_epoch=100, batch_size=10)
# 4. 评估模型
loss, accuracy = model.evaluate(X, Y)
print("\nLoss: %.2f, Accuracy: %.2f%%" % (loss, accuracy*100))
# 5. 数据预测
probabilities = model.predict(X)
predictions = [float(round(x)) for x in probabilities]
accuracy = numpy.mean(predictions == Y)
print("Prediction Accuracy: %.2f%%" % (accuracy*100))
|
{"/opendatatools/realestate/__init__.py": ["/opendatatools/realestate/realestate_interface.py"], "/opendatatools/stock/__init__.py": ["/opendatatools/stock/stock_interface.py"], "/opendatatools/common/__init__.py": ["/opendatatools/common/string_util.py"], "/opendatatools/stock/stock_agent.py": ["/opendatatools/common/__init__.py"], "/opendatatools/hkex/__init__.py": ["/opendatatools/hkex/hkex_interface.py"], "/opendatatools/stock/stock_interface.py": ["/opendatatools/stock/stock_agent.py", "/opendatatools/common/__init__.py"], "/opendatatools/fx/fx_interface.py": ["/opendatatools/common/__init__.py"]}
|
30,402,153
|
18505161903/fwshare
|
refs/heads/master
|
/22.py
|
x=5.3
y=int(x)
print(y)
|
{"/opendatatools/realestate/__init__.py": ["/opendatatools/realestate/realestate_interface.py"], "/opendatatools/stock/__init__.py": ["/opendatatools/stock/stock_interface.py"], "/opendatatools/common/__init__.py": ["/opendatatools/common/string_util.py"], "/opendatatools/stock/stock_agent.py": ["/opendatatools/common/__init__.py"], "/opendatatools/hkex/__init__.py": ["/opendatatools/hkex/hkex_interface.py"], "/opendatatools/stock/stock_interface.py": ["/opendatatools/stock/stock_agent.py", "/opendatatools/common/__init__.py"], "/opendatatools/fx/fx_interface.py": ["/opendatatools/common/__init__.py"]}
|
30,402,154
|
18505161903/fwshare
|
refs/heads/master
|
/example/futures_demo.py
|
# encoding: utf-8
from opendatatools import futures
import datetime
import os
import pandas as pd
import math
import json
from pymongo import MongoClient
if __name__ == '__main__':
markets = ['SHF','CZC', 'DCE', 'CFE']#, 'SHF','CZC', 'DCE', 'CFE'
# 连接数据库
client = MongoClient('localhost', 27017)
db = client.futures # 连接数据库
position = db.position # 集合表
if os.path.exists(r"c:\FuturesPosition.csv"):
os.remove(r"c:\FuturesPosition.csv")
for market in markets:
begin = datetime.date(2018, 10, 10)
end = datetime.date(2018, 10, 10)
for i in range((end - begin).days + 1):
day = begin + datetime.timedelta(days=i)
days=day.strftime('%Y-%m-%d')
print(days)
try:
df, msg = futures.get_trade_rank(market, date=days)
#print(df)
print(days,market)
#position.insert(json.loads(df.T.to_json()).values())
# print(json.loads(df.T.to_json()).values())
except:
print(days,market,'数据异常')
continue
if os.path.exists(r"c:\FuturesPosition.csv"):
df.to_csv(r"c:\FuturesPosition.csv",mode='a',encoding='ANSI',header=False)
else:
df.to_csv(r"c:\FuturesPosition.csv",encoding='ANSI')
|
{"/opendatatools/realestate/__init__.py": ["/opendatatools/realestate/realestate_interface.py"], "/opendatatools/stock/__init__.py": ["/opendatatools/stock/stock_interface.py"], "/opendatatools/common/__init__.py": ["/opendatatools/common/string_util.py"], "/opendatatools/stock/stock_agent.py": ["/opendatatools/common/__init__.py"], "/opendatatools/hkex/__init__.py": ["/opendatatools/hkex/hkex_interface.py"], "/opendatatools/stock/stock_interface.py": ["/opendatatools/stock/stock_agent.py", "/opendatatools/common/__init__.py"], "/opendatatools/fx/fx_interface.py": ["/opendatatools/common/__init__.py"]}
|
30,402,155
|
18505161903/fwshare
|
refs/heads/master
|
/example/futures_market.py
|
# encoding: utf-8
import pandas as pd
import datetime
import json
from pymongo import MongoClient
import fushare
import time
print(fushare.__version__)
pd.set_option('display.width', None) # 设置字符显示宽度
pd.set_option('display.max_rows', None) # 设置显示最大行
pd.set_option('display.max_columns', None) # 设置显示最大行
if __name__ == '__main__':
# 连接数据库
client = MongoClient('localhost', 27017)
db = client.futures5
market = db.market
# 填写日期
begin = datetime.date(2019,9,12)
end = datetime.date(2019,9, 12)
for i in range((end - begin).days + 1):
day = begin + datetime.timedelta(days=i)
days = day.strftime('%Y%m%d')
# 四大交易所行情
# shf = fushare.get_shfe_daily(days)
zce = fushare.get_czce_daily(days)
cff = fushare.get_cffex_daily(days)
dce = fushare.get_dce_daily(days)
frames = [cff,dce]
# time.sleep(1)
try:
# 合并四大交易所行情
df2 = pd.concat(frames)
# 数字化,添加日期列
df2 = df2.dropna(axis=0, how='any')
df2 = 1
df2['date'] = days
df2 = df2.dropna(axis=0, how='any')
df2 = df2.reset_index()
market.insert_many(json.loads(df2.T.to_json()).values())
print(json.loads(df2.T.to_json()).values())
except:
print(days, '数据异常')
continue
|
{"/opendatatools/realestate/__init__.py": ["/opendatatools/realestate/realestate_interface.py"], "/opendatatools/stock/__init__.py": ["/opendatatools/stock/stock_interface.py"], "/opendatatools/common/__init__.py": ["/opendatatools/common/string_util.py"], "/opendatatools/stock/stock_agent.py": ["/opendatatools/common/__init__.py"], "/opendatatools/hkex/__init__.py": ["/opendatatools/hkex/hkex_interface.py"], "/opendatatools/stock/stock_interface.py": ["/opendatatools/stock/stock_agent.py", "/opendatatools/common/__init__.py"], "/opendatatools/fx/fx_interface.py": ["/opendatatools/common/__init__.py"]}
|
30,402,156
|
18505161903/fwshare
|
refs/heads/master
|
/futures/日行情波动表.py
|
# encoding: utf-8
import pandas as pd
import tushare as ts
import matplotlib.pyplot as plt
pd.set_option('display.width', None) # 设置字符显示宽度
pd.set_option('display.max_rows', None) # 设置显示最大行
pd.set_option('display.max_columns', None) # 设置显示最大行
pro = ts.pro_api('c0cad8f56caba4e70702d606290d04f88514a6bef046f60d13144151')
df = pro.fut_daily( symbol='JD2006', exchange='DCE')
df=df.sort_values(['trade_date'],ascending=True)
print(df)
# df2=df.fillna(0)
# jd=df2.loc[df2['broker']!='期货公司会员']
# jd=jd.copy()
# broker2=['宏源期货','方正中期','英大期货','美尔雅期货','格林大华']
# jd['净持仓']=jd.apply(lambda x: x['long_hld'] - x['short_hld'], axis=1)
# jd=jd[jd['trade_date']>='20200310']
#
# net_position = jd.groupby(['trade_date', 'symbol'])['净持仓'].sum().reset_index(name='净空汇总')
# sums = jd.copy()
# # sums = sums[sums['symbol']=='JD2005']
# times = sums[sums['trade_date'] == sums['trade_date'].iloc[-1]]
# # print(times)
# brokers = times['broker']
# df = pd.DataFrame()
# for i in brokers:
# broker = [sums[sums['broker'] == i]]
# df = df.append(broker)
#
# df['净变动'] = df['净持仓'] - df['净持仓'].shift(1).fillna(0)
# # print(df)
# df = df[df['trade_date'] == df['trade_date'].iloc[-1]]
# sums = df.sort_values('净持仓')
# sums = sums[sums['净持仓'] != 0]
# sums =sums[['trade_date', 'symbol','broker','净持仓','净变动','vol','vol_chg','long_hld','long_chg','short_hld','short_chg']]
# print(sums)
#
#
#
# sort=sums.sort_values('净持仓',inplace=False)
# plt.bar(range(len(sort['净持仓'])),sort['净持仓'])
# plt.xticks(range(len(sort['broker'])),sort['broker'],rotation='vertical')
# # plt.xlabel('品种')
# plt.ylabel('净持仓')
# plt.title(' 净空排名 '+sort['trade_date'].iloc[0])
# plt.rcParams['font.sans-serif'] = ['SimHei'] # 用来正常显示中文标签
# plt.rcParams['axes.unicode_minus'] = False # 用来正常显示负号
# plt.show()
|
{"/opendatatools/realestate/__init__.py": ["/opendatatools/realestate/realestate_interface.py"], "/opendatatools/stock/__init__.py": ["/opendatatools/stock/stock_interface.py"], "/opendatatools/common/__init__.py": ["/opendatatools/common/string_util.py"], "/opendatatools/stock/stock_agent.py": ["/opendatatools/common/__init__.py"], "/opendatatools/hkex/__init__.py": ["/opendatatools/hkex/hkex_interface.py"], "/opendatatools/stock/stock_interface.py": ["/opendatatools/stock/stock_agent.py", "/opendatatools/common/__init__.py"], "/opendatatools/fx/fx_interface.py": ["/opendatatools/common/__init__.py"]}
|
30,402,157
|
18505161903/fwshare
|
refs/heads/master
|
/futures/mainSubmain(1).py
|
# -*- coding:utf-8 -*-
import numpy as np
from fushare.symbolVar import *
from fushare.dailyBar import *
from fushare.cot import *
from fushare.cons import *
import tushare as ts
import math
import matplotlib as plot
pro = ts.pro_api('c0cad8f56caba4e70702d606290d04f88514a6bef046f60d13144151')
calendar = cons.get_calendar()
pd.set_option('display.width', None) # 设置字符显示宽度
pd.set_option('display.max_rows', None) # 设置显示最大行
pd.set_option('display.max_columns', None) # 设置显示最大行
def get_mainSubmainMarket_bar(type = 'symbol', var = 'RB',date= None, start = None, end = None):
date = cons.convert_date(date) if date is not None else datetime.date.today()
date = cons.convert_date(date) if date is not None else datetime.date.today()
start = cons.convert_date(start) if start is not None else datetime.date.today()
end = cons.convert_date(end) if end is not None else cons.convert_date(cons.get_latestDataDate(datetime.datetime.now()))
if type == 'symbol':
df = get_future_daily(start=date, end=date, market=symbolMarket(var))
df = df[df['variety'] == var]
# print(df)
return df
if type == 'var':
df = pd.DataFrame()
for market in ['dce','shfe','czce']:
df = df.append(get_future_daily(start=date, end=date, market=market))
varList = list(set(df['variety']))
# print(varList)
dfL=pd.DataFrame()
for var in varList:
try:
ry = get_mainSubmainMarket(date, var, df = df)
if ry:
dfL = dfL.append(pd.DataFrame([ry], index=[var], columns=['差价','basicPrice','symbol1','symbol2','M-differ']))
except:
pass
dfL['date'] = date
dfL = dfL.sort_values('basicPrice')
dfL = dfL[(dfL['basicPrice'] >= 2) | (dfL['basicPrice'] <= -2)]
dfL=dfL[dfL['M-differ']<=0]
return dfL
def get_mainSubmainMarket(date = None, var = 'IF',symbol1 = None, symbol2 = None,c=None,df = None):
date = cons.convert_date(date) if date is not None else datetime.date.today()
if date.strftime('%Y%m%d') not in calendar:
warnings.warn('%s非交易日' % date.strftime('%Y%m%d'))
return None
if symbol1:
var = symbol2varietie(symbol1)
# print(var)
if type(df) != type(pd.DataFrame()):
market = symbolMarket(var)
df = get_future_daily(start=date, end=date, market=market)
if var:
df = df[df['variety'] == var].sort_values('open_interest', ascending=False)
df['close'] = df['close'].astype('float')
# print(df)
symbol1 = df['symbol'].tolist()[0]
symbol2 = df['symbol'].tolist()[1]
close1 = df['close'][df['symbol'] == symbol1.upper()].tolist()[0]
close2 = df['close'][df['symbol'] == symbol2.upper()].tolist()[0]
A = re.sub(r'\D', '', symbol1)
A1 = int(A[:-2])
A2 = int(A[-2:])
B = re.sub(r'\D', '', symbol2)
B1 = int(B[:-2])
B2 = int(B[-2:])
c = (A1 - B1) * 12 + (A2 - B2)
if close1 == 0 or close2 == 0:
return False
if c > 0:
return close1-close2,round((close1-close2)/close1/c*100,2), symbol1,symbol2,c
else:
return close2-close1,round((close2-close1)/close2/c*100,2), symbol1,symbol2,c
def get_mainSubmainPosition(date = None, var = 'IF',symbol1 = None, symbol2 = None,c=None,df = None):
date = cons.convert_date(date) if date is not None else datetime.date.today()
if date.strftime('%Y%m%d') not in calendar:
warnings.warn('%s非交易日' % date.strftime('%Y%m%d'))
return None
if symbol1:
var = symbol2varietie(symbol1)
# print(var)
if type(df) != type(pd.DataFrame()):
market = symbolMarket(var)
df = get_future_daily(start=date, end=date, market=market)
varList = list(set(df['variety']))
if var:
df = df[df['variety'] == var].sort_values('open_interest',ascending=False)
# print(df)
df['close'] = df['close'].astype('float')
# # print(df)
symbol1 = df['symbol'].tolist()[0]
symbol2 = df['symbol'].tolist()[1]
fut1= pro.fut_holding(trade_date=date.strftime('%Y%m%d'),symbol=symbol1).dropna(axis=0, how='any')
fut2 = pro.fut_holding(trade_date=date.strftime('%Y%m%d'), symbol=symbol2).dropna(axis=0, how='any')
fut1['netPosition1'] = fut1.apply(lambda x: x['long_hld'] - x['short_hld'], axis=1)
fut2['netPosition2'] = fut2.apply(lambda x: x['long_hld'] - x['short_hld'], axis=1)
df2=pd.merge(fut1,fut2,on=['trade_date','broker'],how='outer').dropna()
df2=df2[['trade_date','broker','symbol_x','netPosition1','symbol_y','netPosition2']]
df2=df2.rename(columns={'trade_date':'date','symbol_x':'symbol1','symbol_y':'symbol2'})
df3=df2.loc[(df2['netPosition1']>0) & (df2['netPosition2']<0)]
df4 = df2.loc[(df2['netPosition1']<0) & (df2['netPosition2']>0)]
df2=pd.concat([df3,df4])
def valabs1(row):
row['absvalue1'] = row['netPosition1'].abs()
return row
valabs11= df2.groupby(['date','symbol1']).apply(valabs1)
def valabs2(row):
row['absvalue2'] = row['netPosition2'].abs()
return row
valabs22= df2.groupby(['date', 'symbol2']).apply(valabs2)
df2=pd.merge(valabs11,valabs22,how='left')
# df= df2.groupby(['date','broker','symbol1']).apply(lambda x:x['netPosition1'].abs()).reset_index()
df2['ArbitrageFund']= df2.apply(lambda x: x['absvalue1']if x['absvalue1']<x['absvalue2'] else x['absvalue2'] if x['absvalue1']>x['absvalue2']else 0 ,axis=1)
df2=df2.sort_values(['ArbitrageFund'])
df=df2[['date','broker','symbol1','netPosition1','symbol2','netPosition2','ArbitrageFund']]
df=df[df['broker']!='期货公司会员']
return df
# print(data)
if __name__ == '__main__':
date = '20200929'
df=pd.DataFrame()
for i in cons.vars:
try:
d = get_mainSubmainPosition(date,i)
df=df.append(d)
# print(df)
except:
pass
valsort=df.sort_values('ArbitrageFund',ascending=False)
print(valsort)
# data =get_mainSubmainMarket_bar(date=date, type='var')
# print(data)
|
{"/opendatatools/realestate/__init__.py": ["/opendatatools/realestate/realestate_interface.py"], "/opendatatools/stock/__init__.py": ["/opendatatools/stock/stock_interface.py"], "/opendatatools/common/__init__.py": ["/opendatatools/common/string_util.py"], "/opendatatools/stock/stock_agent.py": ["/opendatatools/common/__init__.py"], "/opendatatools/hkex/__init__.py": ["/opendatatools/hkex/hkex_interface.py"], "/opendatatools/stock/stock_interface.py": ["/opendatatools/stock/stock_agent.py", "/opendatatools/common/__init__.py"], "/opendatatools/fx/fx_interface.py": ["/opendatatools/common/__init__.py"]}
|
30,402,158
|
18505161903/fwshare
|
refs/heads/master
|
/opendatatools/stock/stock_interface.py
|
# encoding: utf-8
from .stock_agent import SHExAgent, SZExAgent, CSIAgent
from opendatatools.common import get_current_day
shex_agent = SHExAgent()
szex_agent = SZExAgent()
csi_agent = CSIAgent()
def get_index_list(market='SH'):
if market == 'SH':
return shex_agent.get_index_list()
if market == 'SZ':
return szex_agent.get_index_list()
if market == 'CSI':
return csi_agent.get_index_list()
def get_index_component(symbol):
temp = symbol.split(".")
if len(temp) == 2:
market = temp[1]
index = temp[0]
if market == 'SH':
return shex_agent.get_index_component(index)
elif market == 'SZ':
return szex_agent.get_index_component(index)
elif market == 'CSI':
return csi_agent.get_index_component(index)
else:
return None
else:
return None
def get_rzrq_info(market='SH', date = None):
if date is None:
date = get_current_day(format = '%Y%m%d')
if market == 'SH':
return shex_agent.get_rzrq_info(date)
if market == 'SZ':
return szex_agent.get_rzrq_info(date)
return None, None
def get_dividend(symbol):
temp = symbol.split(".")
if len(temp) == 2:
market = temp[1]
code = temp[0]
if market == 'SH':
return shex_agent.get_dividend(code)
|
{"/opendatatools/realestate/__init__.py": ["/opendatatools/realestate/realestate_interface.py"], "/opendatatools/stock/__init__.py": ["/opendatatools/stock/stock_interface.py"], "/opendatatools/common/__init__.py": ["/opendatatools/common/string_util.py"], "/opendatatools/stock/stock_agent.py": ["/opendatatools/common/__init__.py"], "/opendatatools/hkex/__init__.py": ["/opendatatools/hkex/hkex_interface.py"], "/opendatatools/stock/stock_interface.py": ["/opendatatools/stock/stock_agent.py", "/opendatatools/common/__init__.py"], "/opendatatools/fx/fx_interface.py": ["/opendatatools/common/__init__.py"]}
|
30,402,159
|
18505161903/fwshare
|
refs/heads/master
|
/example/aqi_demo.py
|
# encoding: utf-8
from opendatatools import aqi
# 获取某日全国各大城市的AQI数据
#aqi.get_daily_aqi('2018-01-01')
# 获取单个城市的AQI历史数据
#aqi.get_daily_aqi_onecity('北京市')
#获取单个城市某日的AQI小时数据
#aqi_hour = aqi.get_hour_aqi_onecity('北京市', '2018-05-26')
#aqi_hour.set_index('time', inplace=True)
#print(aqi_hour)
#获取实时AQI小时数据1
aqi_hour = aqi.get_hour_aqi()
print(aqi_hour)
|
{"/opendatatools/realestate/__init__.py": ["/opendatatools/realestate/realestate_interface.py"], "/opendatatools/stock/__init__.py": ["/opendatatools/stock/stock_interface.py"], "/opendatatools/common/__init__.py": ["/opendatatools/common/string_util.py"], "/opendatatools/stock/stock_agent.py": ["/opendatatools/common/__init__.py"], "/opendatatools/hkex/__init__.py": ["/opendatatools/hkex/hkex_interface.py"], "/opendatatools/stock/stock_interface.py": ["/opendatatools/stock/stock_agent.py", "/opendatatools/common/__init__.py"], "/opendatatools/fx/fx_interface.py": ["/opendatatools/common/__init__.py"]}
|
30,402,160
|
18505161903/fwshare
|
refs/heads/master
|
/futures/数据库去重操作.py
|
db.getCollection('position').aggregate([
{
$group: { _id: {date: '$date',variety: '$variety',symbol:'$symbol',long_party_name:'$long_party_name'},count: {$sum: 1},dups: {$addToSet: '$_id'}}
},
{
$match: {count: {$gt: 1}}
}
],{allowDiskUse:true}).forEach(function(doc){
doc.dups.shift(); // 去除重复组的第一个元数据_id,得到除第一个之外的其他元组
db.position.remove({_id: {$in: doc.dups}}); // remove()删除这些重复的数据
})
# position.aggregate([
# {
# $group: { _id: {date: '$date',variety: '$variety'},count: {$sum: 1}}},{$match:{count:{$gt:1}}}])
# ]).forEach(function(doc){
# doc.dups.shift();
# db.userInfo.remove({_id: {$in: doc.dups}});
# })
#
#
# # futures.getCollection('market').aggregate([
# # {
# # $group: { _id: {userName: '$userName',age: '$age'},count: {$sum: 1},dups: {$addToSet: '$_id'}}
# # },
# # {
# # $match: {count: {$gt: 1}}
# # }
# # ]).forEach(function(doc){
# # doc.dups.shift();
# # db.userInfo.remove({_id: {$in: doc.dups}});
# # })
#
#
#
# db.getCollection('market').aggregate([
# {
# $group: { _id: {date: '$date',variety: '$variety'},count: {$sum: 1},dups: {$addToSet: '$_id'}}
# },
# {
# $match: {count: {$gt: 1}}
# }
# ]).forEach(function(doc){
# doc.dups.shift();
# db.userInfo.remove({_id: {$in: doc.dups}});
# })
|
{"/opendatatools/realestate/__init__.py": ["/opendatatools/realestate/realestate_interface.py"], "/opendatatools/stock/__init__.py": ["/opendatatools/stock/stock_interface.py"], "/opendatatools/common/__init__.py": ["/opendatatools/common/string_util.py"], "/opendatatools/stock/stock_agent.py": ["/opendatatools/common/__init__.py"], "/opendatatools/hkex/__init__.py": ["/opendatatools/hkex/hkex_interface.py"], "/opendatatools/stock/stock_interface.py": ["/opendatatools/stock/stock_agent.py", "/opendatatools/common/__init__.py"], "/opendatatools/fx/fx_interface.py": ["/opendatatools/common/__init__.py"]}
|
30,402,161
|
18505161903/fwshare
|
refs/heads/master
|
/futures/Cfd.py
|
# -*- coding:utf-8 -*-
import numpy as np
from fushare.symbolVar import *
from fushare.dailyBar import *
from fushare.cot import *
from fushare.cons import *
import pandas as pd
# import matplotlib as plot
from pymongo import MongoClient
calendar = cons.get_calendar()
pd.set_option('display.width', None) # 设置字符显示宽度
pd.set_option('display.max_rows', None) # 设置显示最大行
pd.set_option('display.max_columns', None) # 设置显示最大行
def get_mainSubmainMarket_bar(type = 'symbol', var = 'RB',date= None, start = None, end = None):
date = cons.convert_date(date) if date is not None else datetime.date.today()
# start = cons.convert_date(start) if start is not None else datetime.date.today()
# end = cons.convert_date(end) if end is not None else cons.convert_date(cons.get_latestDataDate(datetime.datetime.now()))
if type == 'symbol':
df = get_future_daily(start=date, end=date, market=symbolMarket(var))
df = df[df['variety'] == var]
# print(df)
return df
if type == 'var':
df = pd.DataFrame()
for market in ['dce','cffex','shfe','czce']:
df = df.append(get_future_daily(start=date, end=date, market=market))
df['market'] = market.upper()
# print(df)
varList = list(set(df['variety']))
# print(varList)
dfL=pd.DataFrame()
for var in varList:
try:
ry = get_mainSubmainMarket(date, var,df = df)
# ry['market']=df['market']
# print(ry)
if ry:
dfL = dfL.append(pd.DataFrame([ry], index=[var],columns=['差价','basicPrice(%)','symbol1','symbol2','M-differ','Slope(%)']))
# dfL['market']=dfL[dfL['symbol2']==df['symbol2']]#&df['market']
# df['market']=dfL
# print(dfL)
except:
pass
dfL['date'] = date
# print(df)
# dfL2=pd.DataFrame()
for i in dfL['symbol2'].drop_duplicates():
# # print(i)
df2=df[df['symbol']==i]['market']
dfL.append(df2)
# dfL=dfL2.append(dfL)
# print(df2)
# dfL = dfL.sort_values('basicPrice')
# dfL = dfL[(dfL['basicPrice'] >= 2) | (dfL['basicPrice'] <= -2)]
dfL = dfL[dfL['M-differ']<=0]
dfL['signal'] = np.sign(dfL['Slope(%)'])
dfL = dfL.sort_values('Slope(%)')
long = dfL[dfL['signal'] > 0]
long1 = int(long['signal'].count()/3)
long = long.tail(long1)
short = dfL[dfL['signal'] < 0]
short1 = int(short['signal'].count() / 3)
short = short.head(short1)
dfL = long.append(short)#.dropna().drop_duplicates()
return dfL
def get_mainSubmainMarket(date = None, var = 'IF',symbol1 = None, symbol2 = None,c=None,df = None):
date = cons.convert_date(date) if date is not None else datetime.date.today()
if date.strftime('%Y%m%d') not in calendar:
warnings.warn('%s非交易日' % date.strftime('%Y%m%d'))
return None
if symbol1:
var = symbol2varietie(symbol1)
# print(var)
if type(df) != type(pd.DataFrame()):
market = symbolMarket(var)
df = get_future_daily(start=date, end=date, market=market)
if var:
df = df[df['variety'] == var].sort_values('open_interest', ascending=False)
df['close'] = df['close'].astype('float')
# print(df)
symbol1 = df['symbol'].tolist()[0]
symbol2 = df['symbol'].tolist()[1]
close1 = df['close'][df['symbol'] == symbol1.upper()].tolist()[0]
close2 = df['close'][df['symbol'] == symbol2.upper()].tolist()[0]
# print(symbol1,close1,symbol2,close2)
A = re.sub(r'\D', '', symbol1)
A1 = int(A[:-2])
A2 = int(A[-2:])
B = re.sub(r'\D', '', symbol2)
B1 = int(B[:-2])
B2 = int(B[-2:])
c = (A1 - B1) * 12 + (A2 - B2)
# print('sss:',c)
if close1 == 0 or close2 == 0:
return False
if c > 0:
#做多 (近月-远月)/远月/相差月数*100%
return close1-close2,round((close1-close2)/close2/c*100,2), symbol1,symbol2,c,round((close2-close1)/((close2+close1)/2)/c*12,2)
else:
# 做空 (远月-近月)/远月/相差月数*100%
return close2-close1,round((close2-close1)/close2/c*100,2), symbol1,symbol2,c,round((close2-close1)/((close2+close1)/2)/c*12,2)#.lower()
#
#月差接近3%两月资金炒翻翻,商品保证金一般6%左右,远月向近月靠拢2月资金翻倍
if __name__ == '__main__':
# data = get_mainSubmainMarket_bar(date='20200817', type='var')
# print(data)
# data=get_mainSubmainMarket(date='20201013', var='BU')
# print(data)
# 连接数据库u
client = MongoClient('localhost', 27017)
db = client.futures34
cfd = db.basicPrice
begin = datetime.date(2020, 10, 20)
end = datetime.date(2020, 10, 20)
for i in range((end - begin).days + 1):
# print(i)
day = begin + datetime.timedelta(days=i)
days = day.strftime('%Y%m%d')
# print(days)
try:
# data=get_mainSubmainMarket(date,'BU')
data =get_mainSubmainMarket_bar(date=days, type='var')
data['date'] = days
print(data)
# cfd.insert_many(json.loads(data.T.to_json()).values())
# print(json.loads(data.T.to_json()).values())
# print(data)
except:
print(days, '数据异常')
continue
|
{"/opendatatools/realestate/__init__.py": ["/opendatatools/realestate/realestate_interface.py"], "/opendatatools/stock/__init__.py": ["/opendatatools/stock/stock_interface.py"], "/opendatatools/common/__init__.py": ["/opendatatools/common/string_util.py"], "/opendatatools/stock/stock_agent.py": ["/opendatatools/common/__init__.py"], "/opendatatools/hkex/__init__.py": ["/opendatatools/hkex/hkex_interface.py"], "/opendatatools/stock/stock_interface.py": ["/opendatatools/stock/stock_agent.py", "/opendatatools/common/__init__.py"], "/opendatatools/fx/fx_interface.py": ["/opendatatools/common/__init__.py"]}
|
30,402,162
|
18505161903/fwshare
|
refs/heads/master
|
/example/test7.py
|
# encoding: utf-8
import pymongo,json
import pandas as pd
from pandas import Series,DataFrame
import csv
from scipy.stats import pearsonr
import numpy as np
import matplotlib.pyplot as plt
import matplotlib
import time,datetime
# from datetime import datetime
pd.set_option('display.width', None) # 设置字符显示宽度
pd.set_option('display.max_rows', None) # 设置显示最大行
pd.set_option('display.max_columns', None) # 设置显示最大行
client = pymongo.MongoClient('localhost', 27017)
futures = client.futures
signal = futures.position
positionSignal =futures.positionSignal
lsPositionSignal =futures.lsPositionSignal
# # 查询数据
# mydict = {"long_party_name":None }
# x = signal.find(mydict)
# for i in x:
# positionSignal.insert_one((i))
date = '20150101'
df = DataFrame(list(positionSignal.find({'date':{'$gte':date}})))
print(df.head(5))
df['signal'] = df.apply(lambda x: 1 if x['long_openIntr_chg']>0 and x['short_openIntr_chg']<0 else -1 if x['long_openIntr_chg']<0 and x['short_openIntr_chg']>0 else 0 ,axis=1)
del df["_id"]
lsPositionSignal.insert_many(json.loads(df.T.to_json()).values())
print(json.loads(df.T.to_json()).values())
|
{"/opendatatools/realestate/__init__.py": ["/opendatatools/realestate/realestate_interface.py"], "/opendatatools/stock/__init__.py": ["/opendatatools/stock/stock_interface.py"], "/opendatatools/common/__init__.py": ["/opendatatools/common/string_util.py"], "/opendatatools/stock/stock_agent.py": ["/opendatatools/common/__init__.py"], "/opendatatools/hkex/__init__.py": ["/opendatatools/hkex/hkex_interface.py"], "/opendatatools/stock/stock_interface.py": ["/opendatatools/stock/stock_agent.py", "/opendatatools/common/__init__.py"], "/opendatatools/fx/fx_interface.py": ["/opendatatools/common/__init__.py"]}
|
30,402,163
|
18505161903/fwshare
|
refs/heads/master
|
/futures/mainSubmainMarket.py
|
# encoding: utf-8
import pandas as pd
import datetime,time
import json
from pymongo import MongoClient
import fushare as f
from pandas import DataFrame
# from datetime import datetime
pd.set_option('display.width', None) # 设置字符显示宽度
pd.set_option('display.max_rows', None) # 设置显示最大行
pd.set_option('display.max_columns', None) # 设置显示最大行
# 连接数据库u
client = MongoClient('localhost', 27017)
db = client.futures34
mainSubmainMarket = db.mainSubmainMarket
# 填写日期
begin = datetime.date(2020, 3, 1)
# end = datetime.date(2010,8,24)
end = datetime.date.today()
for i in range((end - begin).days + 1):
day = begin + datetime.timedelta(days=i)
days = day.strftime('%Y%m%d')
try:
df=f.get_mainSubmain_bar(type = 'var', date = days)
df = df.apply(pd.to_numeric, errors="ignore")
print(df)
# mainSubmainMarket.insert_many(df.to_dict('records'))
# print(df.to_dict('records'))
except:
print(days, '数据异常')
continue
|
{"/opendatatools/realestate/__init__.py": ["/opendatatools/realestate/realestate_interface.py"], "/opendatatools/stock/__init__.py": ["/opendatatools/stock/stock_interface.py"], "/opendatatools/common/__init__.py": ["/opendatatools/common/string_util.py"], "/opendatatools/stock/stock_agent.py": ["/opendatatools/common/__init__.py"], "/opendatatools/hkex/__init__.py": ["/opendatatools/hkex/hkex_interface.py"], "/opendatatools/stock/stock_interface.py": ["/opendatatools/stock/stock_agent.py", "/opendatatools/common/__init__.py"], "/opendatatools/fx/fx_interface.py": ["/opendatatools/common/__init__.py"]}
|
30,402,164
|
18505161903/fwshare
|
refs/heads/master
|
/example/LSTM/csvtxt.py
|
# import pandas as pd
# data = pd.read_csv('E:\rbindex2019.csv',sep='\s+')
# print(data)
##data.to_csv('e:\data.txt', sep='\t', index=False)
import pandas as pd
#
# df =pd.read_csv('E:\rbindex2019.csv',index=False') # 使用pandas模块读取数据
# print('开始写入txt文件...')
df=pd.read_csv('E:\rbindex2019.csv', header='infer', sep=',', index=False) # 写入,逗号分隔
print(df)
|
{"/opendatatools/realestate/__init__.py": ["/opendatatools/realestate/realestate_interface.py"], "/opendatatools/stock/__init__.py": ["/opendatatools/stock/stock_interface.py"], "/opendatatools/common/__init__.py": ["/opendatatools/common/string_util.py"], "/opendatatools/stock/stock_agent.py": ["/opendatatools/common/__init__.py"], "/opendatatools/hkex/__init__.py": ["/opendatatools/hkex/hkex_interface.py"], "/opendatatools/stock/stock_interface.py": ["/opendatatools/stock/stock_agent.py", "/opendatatools/common/__init__.py"], "/opendatatools/fx/fx_interface.py": ["/opendatatools/common/__init__.py"]}
|
30,402,165
|
18505161903/fwshare
|
refs/heads/master
|
/futures/涨跌排名_short.py
|
import pymongo
import pandas as pd
import matplotlib.pyplot as plt
import matplotlib
import json
import datetime
import time
from pandas import Series,DataFrame
pd.set_option('display.width', None) # 设置字符显示宽度
pd.set_option('display.max_rows', None) # 设置显示最大行
pd.set_option('display.max_columns', None) # 设置显示最大行
#二行即可搞定画图中文乱码
plt.rcParams['font.sans-serif']=['SimHei'] #用来正常显示中文标签
plt.rcParams['axes.unicode_minus']=False #用来正常显示负号
from IPython.core.display import display, HTML
#连接数据库
client = pymongo.MongoClient('localhost',27017)
futures = client.futures2
market = futures.market
begin = DataFrame(list(market.find({}).sort([('_id', -1)]).limit(1)))
begin = begin['date'][0]
print(begin)
symbol = DataFrame(list(market.find({'date': begin})))
symbol = symbol['symbol']
df = pd.DataFrame()
for var in symbol:
try:
market_symbol = DataFrame(list(market.find({'symbol': var, 'date': {'$gte': '20190501'}})))
# print(market_symbol)
# market_symbol = market_symbol[market_symbol['date']<'20190101']
high = market_symbol['high'].max()
low = market_symbol['low'].min()
close = market_symbol['close'].iloc[-1]
if len(var) > 8:
break
今日幅度 = 0
# if close>low and low>0:
# 今日幅度 = (close/low-1)*100
if high > close and low > 0:
今日幅度 = (close / high - 1) * 100
elif close == high:
今日幅度 = 0
elif low > 0:
今日幅度 = (close / high - 1) * 100
print(close, var)
dict_symbol = {'date': [begin], 'symbol': [var], 'High': [high], 'Low': [low], 'close': [close], '今日幅度': 今日幅度}
df1 = pd.DataFrame(dict_symbol)
df = df.append(df1)
except:
continue
df = df[df['今日幅度']<-25].sort_values('今日幅度')
print(df)
df=df.sort_values('今日幅度',inplace=False)
plt.bar(range(len(df['今日幅度'])),df['今日幅度'])
plt.xticks(range(len(df['symbol'])),df['symbol'])
# plt.xlabel('品种')
plt.ylabel('今日幅度')
plt.title(' 涨跌排名 '+df['date'].iloc[0])
plt.rcParams['font.sans-serif'] = ['SimHei'] # 用来正常显示中文标签
plt.rcParams['axes.unicode_minus'] = False # 用来正常显示负号
plt.show()
|
{"/opendatatools/realestate/__init__.py": ["/opendatatools/realestate/realestate_interface.py"], "/opendatatools/stock/__init__.py": ["/opendatatools/stock/stock_interface.py"], "/opendatatools/common/__init__.py": ["/opendatatools/common/string_util.py"], "/opendatatools/stock/stock_agent.py": ["/opendatatools/common/__init__.py"], "/opendatatools/hkex/__init__.py": ["/opendatatools/hkex/hkex_interface.py"], "/opendatatools/stock/stock_interface.py": ["/opendatatools/stock/stock_agent.py", "/opendatatools/common/__init__.py"], "/opendatatools/fx/fx_interface.py": ["/opendatatools/common/__init__.py"]}
|
30,402,166
|
18505161903/fwshare
|
refs/heads/master
|
/example/tets.py
|
import pymongo
import pandas as pd
from pandas import Series,DataFrame
import matplotlib as plt
import matplotlib.dates as mdate
from IPython.core.display import display, HTML
display(HTML("<style>.container { width:100% !important; }</style>"))
#二行即可搞定画图中文乱码
plt.rcParams['font.sans-serif']=['SimHei'] #用来正常显示中文标签
plt.rcParams['axes.unicode_minus']=False #用来正常显示负号
pd.set_option('display.width', None) # 设置字符显示宽度
pd.set_option('display.max_rows', None) # 设置显示最大行
pd.set_option('display.max_columns', None) # 设置显示最大行
#连接数据库
client = pymongo.MongoClient('localhost',27017)
print(client)
futures = client.futures
marketdata = futures.marketdata
# unit = futures.unit
position = futures.position
# df = DataFrame(list(position.find({'date': {'$gte': '20070523'}})))
trade = position.find_one({'date': '20020107', 'variety': 'CU'})['vol']
print(trade)
# date='20180101'
# #加载数据
# position = DataFrame(list(position.find({'date': {'$gte': date}})))
# marketdata = DataFrame(list(marketdata.find({'date': {'$gte': date}})))
# #主力合约
# #替换空值为0
# # clean_z = marketdata['open_interest'].fillna(0)
# # clean_z[clean_z==''] = '0'
# # marketdata['open_interest'] = clean_z
# # 字段类型转换
# marketdata['open_interest'] = marketdata['open_interest'].astype('int')
# #选取条件
# marketdata=marketdata.loc[marketdata['open_interest']>1000]
# #以日期和持仓量2个字段分组筛选唯一主力合约
# marketdata=marketdata.groupby(['date','variety']).apply(lambda x: x[x.open_interest==x.open_interest.max()])
# #去重交易合约
# marketdata=marketdata.drop_duplicates()
# #价格变化量
# # marketdata['change'] = marketdata['close'] - marketdata['open']
# marketdata = marketdata.copy()
# print(marketdata.head())
#
# #净持仓变动量
# netPosition=position.groupby(['date','variety'])[['long_openIntr','short_openIntr']].sum()
# netPosition['净持仓']=netPosition.apply(lambda x:x['long_openIntr']-x['short_openIntr'],axis=1)
# netPosition['上一日净持仓']=netPosition.groupby('variety')['净持仓'].shift(1)
# netPosition['净持仓变化量']=netPosition.apply(lambda x: x['净持仓']-x['上一日净持仓'],axis=1)
# netPosition=netPosition.dropna().reset_index()
# print(netPosition.head())
#
# # #净持仓,价格变化量合并
# df=pd.merge(netPosition,marketdata,on=['date', 'variety'],how='outer')
# df['交易信号'] = df.apply(lambda x: 0 if x['净持仓变化量']*x['change']>=0 else 1 if x['净持仓变化量']>0 else -1,axis=1)
# print(df.head())
|
{"/opendatatools/realestate/__init__.py": ["/opendatatools/realestate/realestate_interface.py"], "/opendatatools/stock/__init__.py": ["/opendatatools/stock/stock_interface.py"], "/opendatatools/common/__init__.py": ["/opendatatools/common/string_util.py"], "/opendatatools/stock/stock_agent.py": ["/opendatatools/common/__init__.py"], "/opendatatools/hkex/__init__.py": ["/opendatatools/hkex/hkex_interface.py"], "/opendatatools/stock/stock_interface.py": ["/opendatatools/stock/stock_agent.py", "/opendatatools/common/__init__.py"], "/opendatatools/fx/fx_interface.py": ["/opendatatools/common/__init__.py"]}
|
30,402,167
|
18505161903/fwshare
|
refs/heads/master
|
/example/futures_signal.py
|
# encoding: utf-8
import pymongo,json
import pandas as pd
from pandas import Series,DataFrame
import csv
from scipy.stats import pearsonr
import numpy as np
import matplotlib.pyplot as plt
import matplotlib
import time,datetime
# from datetime import datetime
pd.set_option('display.width', None) # 设置字符显示宽度
pd.set_option('display.max_rows', None) # 设置显示最大行
pd.set_option('display.max_columns', None) # 设置显示最大行
client = pymongo.MongoClient('localhost', 27017)
futures = client.futures
unit = futures.unit
main = futures.main
signal = futures.signal
lots = futures.lots
start='20110101'
# var='A'
unit = DataFrame(list(unit.find()))
main= DataFrame(list(main.find({'date': {'$gte': start}})))
lots = DataFrame(list(lots.find()))
unit=unit[['variety','unit','minPrice']]
lots = lots[['variety','lots']]
# del main['_id']
main['signal']=main.groupby('variety')['交易信号'].shift(1)
main=main.dropna()
del main['_id']
# print(main.head())
df=pd.merge(main,unit,on=['variety'])#.drop_duplicates()
df1=pd.merge(df,lots,on=['variety'])#.drop_duplicates()
df=df1
# print(all_h1)
df['盈亏计算']= df.apply(lambda x: x['change'] * x['signal'] * x['unit']*x['lots'],axis=1)#.drop_duplicates()
# 交易成本
df['交易成本'] = df.apply(lambda x: x['unit']*x['minPrice']*x['lots']*abs(x['signal']),axis=1)#.drop_duplicates()
df['净利润'] = df['盈亏计算'] -df['交易成本']
# print(all_h.head())
df= df.drop_duplicates()
df['pnl'] = df['净利润'].cumsum()
df.to_csv(r"e:\signal.csv", mode='a', encoding='ANSI', header=True)
# signal.insert(json.loads(df.T.to_json()).values())
# print(json.loads(df.T.to_json()).values())
|
{"/opendatatools/realestate/__init__.py": ["/opendatatools/realestate/realestate_interface.py"], "/opendatatools/stock/__init__.py": ["/opendatatools/stock/stock_interface.py"], "/opendatatools/common/__init__.py": ["/opendatatools/common/string_util.py"], "/opendatatools/stock/stock_agent.py": ["/opendatatools/common/__init__.py"], "/opendatatools/hkex/__init__.py": ["/opendatatools/hkex/hkex_interface.py"], "/opendatatools/stock/stock_interface.py": ["/opendatatools/stock/stock_agent.py", "/opendatatools/common/__init__.py"], "/opendatatools/fx/fx_interface.py": ["/opendatatools/common/__init__.py"]}
|
30,402,168
|
18505161903/fwshare
|
refs/heads/master
|
/futures/22.py
|
# encoding: utf-8
# import tensorflow as tf
import datetime
import pandas as pd
import json
from pymongo import MongoClient
import fushare as ak
pd.set_option('display.width', None) # 设置字符显示宽度
pd.set_option('display.max_rows', None) # 设置显示最大行
pd.set_option('display.max_columns', None) # 设置显示最大行
def get_trade_rank(market = 'SHF', date = None):
if date is None:
date = get_target_date(-1, "%Y-%m-%d")
if market == 'SHF':
return ak.get_shfe_rank_table(date)
if market == 'DCE':
return ak.get_dce_rank_table(date)
if market == 'CZC':
return ak.get_czce_rank_table(date)
if market == "CFE":
return ak.get_cffex_rank_table(date)
return None, '不支持的市场类型'
if __name__ == '__main__':
markets = ['CZC', 'SHF','CFE','DCE']#, 'CZC', 'SHF','CFE','DCE'
# 连接数据库
client = MongoClient('localhost', 27017)
db = client.futures2
position = db.position
df=pd.DataFrame()
for market in markets:
begin = datetime.date(2020,7,22)
end = datetime.date(2020,10,12)
# end = datetime.date.today()
for i in range((end - begin).days + 1):
day = begin + datetime.timedelta(days=i)
days=day.strftime('%Y%m%d')
try:
df = get_trade_rank(market, date=days)
# print(days, market)
for key, value in df.items():
value['date'] = days
value['symbol'] = value['symbol'].str.upper()
# vars = position[position['variety'] == var]
# position.insert(json.loads(value.T.to_json()).values())
# print(value)
#去除具体合约。因汇总持仓有问题
if market != 'CZC':
# print('insert into',key)
position.insert_many(json.loads(value.T.to_json()).values())
else:
value=value[value['symbol']==value['variety']]
# print('insert into',key)
# print(value)
# position.insert_many(json.loads(value.T.to_json()).values())
# print(json.loads(value.T.to_json()).values())
df=df.append(value)
print()
except:
print(days,market,'数据异常')
continue
#所有会员
party_name = value[value['date'] == end]
long_party_name = party_name['long_party_name']
short_party_name = party_name['short_party_name']
party_name = long_party_name.append(short_party_name).drop_duplicates()
#多空变化量求和
long = value.groupby(['date', 'variety', 'long_party_name'])['long_openIntr'].sum()
# print(long)
short = value.groupby(['date', 'variety', 'short_party_name'])['short_openIntr'].sum()
# # 合并
frames = [long, short]
position = pd.concat(frames, axis=1, sort=True).fillna(0).reset_index()
position
# 字段更名
position = position.rename(columns={'level_0': 'date', 'level_1': 'variety', 'level_2': 'BrokerID'})
然后保存excel
|
{"/opendatatools/realestate/__init__.py": ["/opendatatools/realestate/realestate_interface.py"], "/opendatatools/stock/__init__.py": ["/opendatatools/stock/stock_interface.py"], "/opendatatools/common/__init__.py": ["/opendatatools/common/string_util.py"], "/opendatatools/stock/stock_agent.py": ["/opendatatools/common/__init__.py"], "/opendatatools/hkex/__init__.py": ["/opendatatools/hkex/hkex_interface.py"], "/opendatatools/stock/stock_interface.py": ["/opendatatools/stock/stock_agent.py", "/opendatatools/common/__init__.py"], "/opendatatools/fx/fx_interface.py": ["/opendatatools/common/__init__.py"]}
|
30,402,169
|
18505161903/fwshare
|
refs/heads/master
|
/opendatatools/fx/fx_interface.py
|
# encoding: utf-8
from .chinamoney_agent import ChinaMoneyAgent
from opendatatools.common import date_convert, get_current_day, get_target_date
chinamoney_agent = ChinaMoneyAgent()
def format_date_param(start_date, end_date):
if end_date is None:
end_date = get_current_day()
if start_date is None:
start_date = get_target_date(-360, '%Y-%m-%d')
return start_date, end_date
def get_hist_cny_cpr(start_date = None, end_date = None):
start_date, end_date = format_date_param(start_date, end_date)
return chinamoney_agent.get_hist_cny_cpr(start_date, end_date)
def get_his_shibor(start_date = None, end_date = None):
start_date, end_date = format_date_param(start_date, end_date)
return chinamoney_agent.get_his_shibor(start_date, end_date)
def get_realtime_shibor():
return chinamoney_agent.get_realtime_shibor()
def get_cny_spot_price():
return chinamoney_agent.get_cny_spot_price()
|
{"/opendatatools/realestate/__init__.py": ["/opendatatools/realestate/realestate_interface.py"], "/opendatatools/stock/__init__.py": ["/opendatatools/stock/stock_interface.py"], "/opendatatools/common/__init__.py": ["/opendatatools/common/string_util.py"], "/opendatatools/stock/stock_agent.py": ["/opendatatools/common/__init__.py"], "/opendatatools/hkex/__init__.py": ["/opendatatools/hkex/hkex_interface.py"], "/opendatatools/stock/stock_interface.py": ["/opendatatools/stock/stock_agent.py", "/opendatatools/common/__init__.py"], "/opendatatools/fx/fx_interface.py": ["/opendatatools/common/__init__.py"]}
|
30,402,170
|
18505161903/fwshare
|
refs/heads/master
|
/example/LSTM/P.py
|
import pymongo
import pandas as pd
import matplotlib as plt
from IPython.core.display import display, HTML
display(HTML("<style>.container { width:100% !important; }</style>"))
#二行即可搞定画图中文乱码
plt.rcParams['font.sans-serif']=['SimHei'] #用来正常显示中文标签
plt.rcParams['axes.unicode_minus']=False #用来正常显示负号
pd.set_option('display.width', None) # 设置字符显示宽度
pd.set_option('display.max_rows', None) # 设置显示最大行
pd.set_option('display.max_columns', None) # 设置显示最大行
#连接数据库
client = pymongo.MongoClient('localhost',27017)
print(client)
futures = client.futures
position= futures.position
data=position.find_one()
print(data)
|
{"/opendatatools/realestate/__init__.py": ["/opendatatools/realestate/realestate_interface.py"], "/opendatatools/stock/__init__.py": ["/opendatatools/stock/stock_interface.py"], "/opendatatools/common/__init__.py": ["/opendatatools/common/string_util.py"], "/opendatatools/stock/stock_agent.py": ["/opendatatools/common/__init__.py"], "/opendatatools/hkex/__init__.py": ["/opendatatools/hkex/hkex_interface.py"], "/opendatatools/stock/stock_interface.py": ["/opendatatools/stock/stock_agent.py", "/opendatatools/common/__init__.py"], "/opendatatools/fx/fx_interface.py": ["/opendatatools/common/__init__.py"]}
|
30,403,424
|
NickHarnau/WDR_Football_predictions
|
refs/heads/main
|
/Main.py
|
from ML_Versuche import *
from Scrap_current_Community_Bets import *
from Scrape_current_betquotes import *
from Main_function import *
from Function_Stats import *
df_crowd_prediction = scrape_current_community_bets(link="https://allegegenpistor.wdr2.de/tipptrend.php?spieltag=") # function from Scrap_current_Community_Bets
df_quotes_result = current_betquotes() # function from Scrape_current_betquotes
# bring both df together over Home and Away team
# get team names from df_quotes_result
Mannschaftsnamen_1 = df_quotes_result.HomeTeam.unique()
# unfortunatels th dfs are in different order - make a list manuel fitting to Mannschaftsname_1 to get the right order
Mannschaftsnamen_2 = ["FC Bayern München", "Eintracht Frankfurt", "1. FC Köln", "VfB Stuttgart"
, "1. FC Union Berlin", "Werder Bremen", "Borussia Dortmund", "RB Leipzig"
, "VfL Wolfsburg", "Hertha BSC", "FC Augsburg", "Arminia Bielefeld"
, "Bayer 04 Leverkusen", "1. FSV Mainz 05", "Borussia Mönchengladbach"
, "FC Schalke 04", "TSG 1899 Hoffenheim", "SC Freiburg"]
# fit team names in df_quotes_result
df_quotes_result = df_quotes_result.replace(dict(zip(Mannschaftsnamen_1, Mannschaftsnamen_2)))
# merge by HT and AT (the combination only once a season)
merge_df = pd.merge(df_crowd_prediction,df_quotes_result, how="left",
left_on=["home_team","away_team"], right_on=["HomeTeam", "AwayTeam"])
#fit syntax und types
merge_df = fix_syntax_and_types(merge_df) # function in Main_function
# potencial wins
earning_Predictions = earning(df=merge_df) # favorites
earning_Predictions25 = earning(df=merge_df, standard_bet=False) # over 2,5
earning_Predictions_Doppelte = earning(df=merge_df, standard_bet=False, DoppelteChance=True) # 1x / x2
## more Statistics in Stats
# ML
dict_wins = ML_try(df=merge_df, test_size=0.33, state=18112020, target="FTR", X=["percentage_bet_home", "percentage_bet_draw", "percentage_bet_away"])# more possibilities "B365H","B365D","B365A"
|
{"/Stats.py": ["/Main.py", "/Function_Stats.py"], "/Main.py": ["/ML_Versuche.py", "/Scrap_current_Community_Bets.py", "/Scrape_current_betquotes.py", "/Main_function.py", "/Function_Stats.py"], "/Scrape_current_betquotes.py": ["/Function_Scrape_current_betquotes.py"], "/Scrap_current_Community_Bets.py": ["/Objects.py"]}
|
30,403,425
|
NickHarnau/WDR_Football_predictions
|
refs/heads/main
|
/Scrape_current_betquotes.py
|
import pandas as pd
from Function_Scrape_current_betquotes import *
""" https://www.football-data.co.uk/germanym.php
link for bet quote scrape - results also inside
"""
def current_betquotes():
df_quotes_result = pd.read_csv("https://www.football-data.co.uk/mmz4281/2021/D1.csv")
# keep only relevant columns
df_quotes_result = df_quotes_result[["HomeTeam", "AwayTeam"
,"FTHG", "FTAG", "FTR"
,"B365H", "B365D", "B365A"
,"B365>2.5", "B365<2.5"]]
df_quotes_result = calculate_doubleChance("B365H", "B365D", "B365A", df_quotes_result)
return df_quotes_result
|
{"/Stats.py": ["/Main.py", "/Function_Stats.py"], "/Main.py": ["/ML_Versuche.py", "/Scrap_current_Community_Bets.py", "/Scrape_current_betquotes.py", "/Main_function.py", "/Function_Stats.py"], "/Scrape_current_betquotes.py": ["/Function_Scrape_current_betquotes.py"], "/Scrap_current_Community_Bets.py": ["/Objects.py"]}
|
30,403,426
|
NickHarnau/WDR_Football_predictions
|
refs/heads/main
|
/Function_Stats.py
|
import numpy as np
# If Game result is X - how often was it predicted
def right_prediction_per_outcome(df, result, outcome_close, outcome_far, FulltimeResults="FTR", PredictionResults="BR", detail = True):
df_pre = df.loc[df[FulltimeResults]==result]
if detail:
return(len(df_pre.loc[df_pre[PredictionResults]== result])/len(df_pre)), len(df_pre.loc[df_pre[PredictionResults]== outcome_close])/len(df_pre),len(df_pre.loc[df_pre[PredictionResults]== outcome_far])/len(df_pre)
else:
return (len(df_pre.loc[df_pre[PredictionResults]== result])/len(df_pre))
# If the prediction is X - how often was the result like that
def right_outcome_per_prediction(df, result, outcome_close, outcome_far, FulltimeResults="FTR", PredictionResults="BR", detail=True):
df_pre = df.loc[df[PredictionResults]==result]
if detail:
return(len(df_pre.loc[df_pre[FulltimeResults]== result])/len(df_pre)), len(df_pre.loc[df_pre[FulltimeResults]== outcome_close])/len(df_pre),len(df_pre.loc[df_pre[FulltimeResults]== outcome_far])/len(df_pre)
else:
return (len(df_pre.loc[df_pre[FulltimeResults] == result]) / len(df_pre))
# earnings
def earning(df, standard_bet=True , DoppelteChance=False ):
if standard_bet:
df.loc[(df["FTR"] == "H") & (df["BR"] == "H"), "Gewinne_right_pred"] = df["B365H"] - 1
df.loc[(df["FTR"] == "D") & (df["BR"] == "D"), "Gewinne_right_pred"] = df["B365D"] - 1
df.loc[(df["FTR"] == "A") & (df["BR"] == "A"), "Gewinne_right_pred"] = df["B365A"] - 1
df["Gewinne_right_pred"].fillna(-1, inplace=True)
return df["Gewinne_right_pred"].sum()
elif DoppelteChance:
df.loc[(df["percentage_bet_home"] > df["percentage_bet_away"]) & (df["FTR"] != "A"), "Gewinne_Doppelte"] = df["Quote 1x"] - 1
df.loc[(df["percentage_bet_home"] < df["percentage_bet_away"]) & (df["FTR"] != "H"), "Gewinne_Doppelte"] = df["Quote x2"] - 1
df["Gewinne_Doppelte"].fillna(-1, inplace=True)
return df["Gewinne_Doppelte"].sum()
else:
# potenzieller Gewinn bei über unter 2,5
df["Pred_25"] = np.where(df["predict_goals_home"] + df["predict_goals_away"] > 2, "Y", "N")
df["Result_25"] = np.where(df["FTHG"] + df["FTAG"] > 2, "Y", "N")
df.loc[(df["Result_25"] == "Y") & (df["Pred_25"] == "Y"), "Gewinne25_right_pred"] = df["B365>2.5"] - 1
df.loc[(df["Result_25"] == "N") & (df["Pred_25"] == "N"), "Gewinne25_right_pred"] = df["B365<2.5"] - 1
df["Gewinne25_right_pred"].fillna(-1, inplace=True)
return df["Gewinne25_right_pred"].sum()
|
{"/Stats.py": ["/Main.py", "/Function_Stats.py"], "/Main.py": ["/ML_Versuche.py", "/Scrap_current_Community_Bets.py", "/Scrape_current_betquotes.py", "/Main_function.py", "/Function_Stats.py"], "/Scrape_current_betquotes.py": ["/Function_Scrape_current_betquotes.py"], "/Scrap_current_Community_Bets.py": ["/Objects.py"]}
|
30,403,427
|
NickHarnau/WDR_Football_predictions
|
refs/heads/main
|
/ML_Versuche.py
|
from sklearn.model_selection import train_test_split
from sklearn.metrics import classification_report,confusion_matrix
from sklearn.ensemble import RandomForestClassifier
from sklearn.linear_model import LogisticRegression
from sklearn.svm import LinearSVC
from sklearn.neighbors import KNeighborsClassifier
from sklearn.naive_bayes import GaussianNB
def ML_try(df, test_size, state, target, X):
test = df.copy()
Y = test[target]
X = test[X]
X_train, X_test, y_train, y_test = train_test_split(X, Y, test_size=test_size, random_state=state)
Classifier = [RandomForestClassifier(), LogisticRegression(), LinearSVC(), KNeighborsClassifier(n_neighbors=5),
GaussianNB()]
Predictions = []
for classifier in Classifier:
cl = classifier
cl.fit(X_train, y_train)
Predictions.append(cl.predict(X_test))
print(cl.score(X_test, y_test))
print(classification_report(y_test, cl.predict(X_test)))
print(confusion_matrix(y_test, cl.predict(X_test)))
Vorhersage = test.loc[y_test.index]
Vorhersage["RF"]= Predictions[0]
Vorhersage["LR"]= Predictions[1]
Vorhersage["SVC"]= Predictions[2]
Vorhersage["KNN"]= Predictions[3]
Vorhersage["GN"]= Predictions[4]
Columns = ["RF", "LR", "SVC", "KNN", "GN"]
Gewinne = []
for column in Columns:
Gewinn = Vorhersage.loc[Vorhersage["FTR"] == Vorhersage[column]]
Gewinn.loc[Gewinn["FTR"] == "D", "Gewinn_{}".format(column)] = Gewinn["B365D"]
Gewinn.loc[Gewinn["FTR"] == "A", "Gewinn_{}".format(column)] = Gewinn["B365A"]
Gewinn.loc[Gewinn["FTR"] == "H", "Gewinn_{}".format(column)] = Gewinn["B365H"]
Gewinne.append(Gewinn["Gewinn_{}".format(column)].sum() - len(Vorhersage))
dict_wins = dict(zip(Columns, Gewinne))
return dict_wins
|
{"/Stats.py": ["/Main.py", "/Function_Stats.py"], "/Main.py": ["/ML_Versuche.py", "/Scrap_current_Community_Bets.py", "/Scrape_current_betquotes.py", "/Main_function.py", "/Function_Stats.py"], "/Scrape_current_betquotes.py": ["/Function_Scrape_current_betquotes.py"], "/Scrap_current_Community_Bets.py": ["/Objects.py"]}
|
30,403,428
|
NickHarnau/WDR_Football_predictions
|
refs/heads/main
|
/Function_Scrape_current_betquotes.py
|
import numpy as np
def calculate_doubleChance(homequote, drawquote, awayquote, df):
# calculate double chance https://www.reddit.com/r/SoccerBetting/comments/90fd4d/how_to_calculate_double_chance/
df["Quote 1x"] = 1 / (1 / df[homequote] + 1 / df[drawquote])
df["Quote x2"] = 1 / (1 / df[awayquote] + 1 / df[drawquote])
df["Quote 12"] = 1 / (1 / df[awayquote] + 1 / df[homequote])
df["Quote 1x"] = np.where(df["Quote 1x"] < 1, 1, df["Quote 1x"])
df["Quote x2"] = np.where(df["Quote x2"] < 1, 1, df["Quote x2"])
df["Quote 12"] = np.where(df["Quote 12"] < 1, 1, df["Quote 12"])
return df
|
{"/Stats.py": ["/Main.py", "/Function_Stats.py"], "/Main.py": ["/ML_Versuche.py", "/Scrap_current_Community_Bets.py", "/Scrape_current_betquotes.py", "/Main_function.py", "/Function_Stats.py"], "/Scrape_current_betquotes.py": ["/Function_Scrape_current_betquotes.py"], "/Scrap_current_Community_Bets.py": ["/Objects.py"]}
|
30,403,429
|
NickHarnau/WDR_Football_predictions
|
refs/heads/main
|
/Main_function.py
|
def fix_syntax_and_types(df):
df = df.replace({'%': ''}, regex=True)
df = df.replace({'-': "0"}, regex=True) # if 0% have bet on a team "-" -> change to 0
df = df.astype({"predict_goals_home": int, "predict_goals_away": int,"percentage_bet_home": int, "percentage_bet_draw": int, "percentage_bet_away": int})
return df
|
{"/Stats.py": ["/Main.py", "/Function_Stats.py"], "/Main.py": ["/ML_Versuche.py", "/Scrap_current_Community_Bets.py", "/Scrape_current_betquotes.py", "/Main_function.py", "/Function_Stats.py"], "/Scrape_current_betquotes.py": ["/Function_Scrape_current_betquotes.py"], "/Scrap_current_Community_Bets.py": ["/Objects.py"]}
|
30,403,430
|
NickHarnau/WDR_Football_predictions
|
refs/heads/main
|
/Scrap_current_Community_Bets.py
|
import requests
from bs4 import BeautifulSoup
import re
from Objects import *
from datetime import datetime
import pandas as pd
import numpy as np
def scrape_current_community_bets(link):
link = link
Spieltag = list(range(1,35))
Dic_Spieltag_Datum = {1: '18.09.2020', 2: '25.09.2020', 3: '02.10.2020', 4: '17.10.2020', 5: '23.10.2020',
6: '30.10.2020', 7: '06.11.2020', 8: '21.11.2020', 9: '27.11.2020', 10: '04.12.2020',
11: '11.12.2020', 12: '15.12.2020', 13: '18.12.2020', 14: '02.01.2021', 15: '08.01.2021',
16: '15.01.2021', 17: '19.01.2021', 18: '22.01.2021', 19: '29.01.2021', 20: '05.02.2021',
21: '12.02.2021', 22: '19.02.2021', 23: '26.02.2021', 24: '05.03.2021', 25: '12.03.2021',
26: '19.03.2021', 27: '03.04.2021', 28: '09.04.2021', 29: '16.04.2021', 30: '20.04.2021',
31: '23.04.2021', 32: '07.05.2021', 33: '15.05.2021', 34: '22.05.2021'}
matches = []
last_matchday = 0
for Tag in Spieltag:
if datetime.strptime(Dic_Spieltag_Datum[Tag],"%d.%m.%Y") < datetime.today():
url = link + str(Tag)
headers = {"User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:71.0) Gecko/20100101 Firefox/71.0"}
page = requests.get(url, headers=headers)
soup = BeautifulSoup(page.content, 'html.parser')
soup.prettify()
Relevanter_Teil = soup.find_all("div",{"class":"box"})[2]
Ergebnisse = Relevanter_Teil.find("tbody")
Spiele = Ergebnisse.find_all("tr")
Spiele = Spiele[:9]
for Spiel in Spiele:
Match = Spiel.find_all("td")
Mannschaften = Match[0].text
Mannschaften = Mannschaften.split(":")
Heimteam = Mannschaften[0].strip()
Awayteam = Mannschaften[1].strip()
HS= Match[1].text.strip()
U = Match[2].text.strip()
AS = Match[3].text.strip()
Tipp = Match[4].text.strip()
Tipp = Tipp.split(":")
Tipp_Tore_H = Tipp[0]
Tipp_Tore_A = Tipp[1]
matches.append(match_object_predict(Heimteam,Awayteam,HS,U,AS,Tipp_Tore_H,Tipp_Tore_A, Tag, Dic_Spieltag_Datum[Tag]))
last_matchday = Tag
else:
print("The {}. Matchday is on the {} .".format(Tag,Dic_Spieltag_Datum[Tag] ))
df_crowd_prediction = pd.DataFrame([vars(f) for f in matches])
df_crowd_prediction.loc[df_crowd_prediction["predict_goals_home"]> df_crowd_prediction["predict_goals_away"], "BR"]= "H"
df_crowd_prediction.loc[df_crowd_prediction["predict_goals_home"]< df_crowd_prediction["predict_goals_away"], "BR"]= "A"
df_crowd_prediction.loc[df_crowd_prediction["predict_goals_home"] == df_crowd_prediction["predict_goals_away"], "BR"]= "D"
return df_crowd_prediction
|
{"/Stats.py": ["/Main.py", "/Function_Stats.py"], "/Main.py": ["/ML_Versuche.py", "/Scrap_current_Community_Bets.py", "/Scrape_current_betquotes.py", "/Main_function.py", "/Function_Stats.py"], "/Scrape_current_betquotes.py": ["/Function_Scrape_current_betquotes.py"], "/Scrap_current_Community_Bets.py": ["/Objects.py"]}
|
30,403,431
|
NickHarnau/WDR_Football_predictions
|
refs/heads/main
|
/Stats.py
|
from Main import *
from Function_Stats import *
import numpy as np
# first check types and convert them
merge_df.dtypes
merge_df = merge_df.astype({"predict_goals_home": int, "predict_goals_away": int})
# Games so far
len(merge_df)
# Number of right predicted games
len(merge_df.loc[merge_df["FTR"]== merge_df["BR"]])
# percentage of right predicted games
(len(merge_df.loc[merge_df["FTR"]== merge_df["BR"]])/len(merge_df))
# For a prediction of X - how often was the result like this
## BR = H
H_Prediction_H_Win,H_Prediction_D,H_Prediction_A_Win = right_outcome_per_prediction(df=merge_df,result="H", outcome_close="D", outcome_far="A")
## FTR = D
D_Prediciton_D,D_Prediction_H_Win,D_Prediction_A_Win = right_outcome_per_prediction(df=merge_df,result="D", outcome_close="H", outcome_far="A")
## FTR = A
A_Prediction_A_Win,A_Prediction_D,A_Prediction_H_Win = right_outcome_per_prediction(df=merge_df,result="A", outcome_close="D", outcome_far="H")
# If the result is X - how often was it predicted
## FTR = H
H_Win_H_Prediction,H_Win_D_Prediction,H_Win_A_Prediction = right_prediction_per_outcome(df=merge_df,result="H", outcome_close="D", outcome_far="A")
## FTR = D
D_D_Prediction,D_H_Prediction,D_A_Prediction = right_prediction_per_outcome(df=merge_df,result="D", outcome_close="H", outcome_far="A")
## FTR = A
A_Win_A_Prediction,A_win_D_Prediction,A_Win_H_Prediction = right_prediction_per_outcome(df=merge_df,result="A", outcome_close="D", outcome_far="H")
# Right goal prediction (only works if main was run)
len(merge_df.loc[merge_df["Pred_25"]== merge_df["Result_25"]])
(len(merge_df.loc[merge_df["Pred_25"]== merge_df["Result_25"]])/len(merge_df))
##>2.5 = Y
pred25_over25 = right_outcome_per_prediction(df=merge_df,result="Y", outcome_close="N",
outcome_far="N", FulltimeResults="Result_25",
PredictionResults="Pred_25", detail=False)
pred25_under25 = 1-pred25_over25
pred_under25_under25 = pred25_over25 = right_outcome_per_prediction(df=merge_df,result="N", outcome_close="Y",
outcome_far="Y", FulltimeResults="Result_25",
PredictionResults="Pred_25", detail=False)
pred_under25_over25 = 1-pred_under25_under25
|
{"/Stats.py": ["/Main.py", "/Function_Stats.py"], "/Main.py": ["/ML_Versuche.py", "/Scrap_current_Community_Bets.py", "/Scrape_current_betquotes.py", "/Main_function.py", "/Function_Stats.py"], "/Scrape_current_betquotes.py": ["/Function_Scrape_current_betquotes.py"], "/Scrap_current_Community_Bets.py": ["/Objects.py"]}
|
30,403,432
|
NickHarnau/WDR_Football_predictions
|
refs/heads/main
|
/Objects.py
|
class match_object_predict:
def __init__(self, home_team, away_team,percentage_bet_home, percentage_bet_draw, percentage_bet_away,
predict_goals_home,predict_goals_away, Spieltag,
date):
self.home_team = home_team
self.away_team = away_team
self.percentage_bet_home = percentage_bet_home
self.percentage_bet_draw = percentage_bet_draw
self.percentage_bet_away = percentage_bet_away
self.predict_goals_home = predict_goals_home
self.predict_goals_away = predict_goals_away
self.Spieltag = Spieltag
self.date = date
|
{"/Stats.py": ["/Main.py", "/Function_Stats.py"], "/Main.py": ["/ML_Versuche.py", "/Scrap_current_Community_Bets.py", "/Scrape_current_betquotes.py", "/Main_function.py", "/Function_Stats.py"], "/Scrape_current_betquotes.py": ["/Function_Scrape_current_betquotes.py"], "/Scrap_current_Community_Bets.py": ["/Objects.py"]}
|
30,464,228
|
wolfMatheus/Grafos
|
refs/heads/master
|
/main.py
|
import os
import timeit
import funcoes_de_matrizes
import funcoes_de_listas
import numpy as np
#verifica e entra no arquivo do grafo
verificador = False
arquivo = ''
while not verificador:
arquivo = input('\n Informe o nome do arquivo: ')
verificador = os.path.exists(arquivo)
if not verificador:
espera = input("\nArquivo não encontrado!\n Verifique o nome do arquivo e tente novamente!")
print('\n Arquivo encontrado!\n')
arquivo = open(arquivo, 'r')
#seleção de lista ou matriz para o grafo.
print('\tEscolha a representação do grafo desejado:\nSendo:')
cond=True
while (cond):
selecao = input('\t1 para lista de adjacencias. \n\t2 para matriz de adjacencias: ')
selecao = int(selecao)
if ((selecao==1)or(selecao==2)):
cond=False
else:
print('\n selecao invalida!')
if selecao == 1:
#Lista de adjacencias
print('\n\nLista de adjacencias:')
#Medindo o tempo de execução
tempoinicio = timeit.default_timer()
#chamando a função de listas
retorno = funcoes_de_listas.GrafoLista(arquivo)
tempofinal = timeit.default_timer()
#limitando impressão do grafo em 12 vertices
if retorno.vertices < 12:
for item in retorno.grafo:
print('\t', item)
else:
print('\n Grafo muito grande para ser impresso')
print('*** Tempo de busca em Largura: ',retorno.tempo_largura_lista,'***')
print('*** Tempo de busca em Profundidade: ',retorno.tempo_profundidade_lista,'***')
else:
#Matriz de adjacencias
print('\n\nMatriz de adjacencias:')
#Medindo o tempo de execução
tempoinicio = timeit.default_timer()
#chamando a função de matrizes
retorno = funcoes_de_matrizes.GrafoMatriz(arquivo)
tempofinal = timeit.default_timer()
#limitando impressão do grafo em 12 vertices
if retorno.vertices < 12:
for item in retorno.grafo:
print('\t', item)
else:
print('\n Grafo muito grande para ser impresso')
#tempo de busca
print('*** Tempo de busca em Largura: ',retorno.tempo_largura,'***')
print('*** Tempo de busca em Profundidade: ',retorno.tempo_profundidade,'***')
#C vertice de maior e menor grau, o grau medio dos vertices e a distribuicão empirica do grau dos vertices
print('Maior grau:', retorno.maior_grau[1], '- vertice:', retorno.maior_grau[0])
print('Menor grau:', retorno.menor_grau[1], '- vertice:', retorno.menor_grau[0])
print('\nGrau Medio:', retorno.grau_medio)
print('Frequencia relativa:')
for (grau, freq) in retorno.frequencia:
print('\tGrau', grau, ': ', freq)
#E Componentes Conexas
print('Componentes conexas: ', retorno.componentes_conexas)
#Tempo total
print('-Tempo de execução:', tempofinal - tempoinicio)
input('Pressione Enter para continuar:-> ')
print('Numeros conexos: ')
for item in retorno.num_conexa:
print('-', item, 'vertices')
input()
print('Numero de vertices:', retorno.vertices)
print('Numero de arestas:', retorno.arestas)
|
{"/main.py": ["/funcoes_de_matrizes.py", "/funcoes_de_listas.py"]}
|
30,464,229
|
wolfMatheus/Grafos
|
refs/heads/master
|
/funcoes_de_listas.py
|
import timeit
class GrafoLista:
# Classe com todos os atributos do grafo na representação lista de adjacencias
def __init__(self, arquivo):
(self.vertices, self.arestas, self.grafo) = self.cria_lista(arquivo)
(self.maior_grau, self.menor_grau, self.grau_medio, self.frequencia) = self.definir_graus()
#busca no vertice 1
self.tempo_largura_lista=self.busca_em_largura2(0)
self.tempo_profundidade_lista=self.busca_de_profundidade2(0)
(self.componentes_conexas, self.num_conexa) = self.conexos()
def cria_lista(self, arquivo):
# Cria lista de adjacencias"
header = arquivo.readline()
info = header.split(' ')
vertices = int(info[0])
arestas = int(info[1])
lista = []
for i in range(vertices):
lista.append([])
for header in arquivo:
info = header.split(' ')
origem = int(info[0])
destino = int(info[1])
peso = int(info[2])
lista[origem].append((destino, peso))
lista[destino].append((origem, peso))
return (vertices, arestas, lista)
def definir_graus(self):
#C vertice de maior e menor grau, o grau medio dos vertices e a distribuicão empirica do grau dos vertices
maior = [0, 0]
menor = [0, 1000000]
soma = 0
cont = [0 for _ in range(self.vertices)]
frequencia = []
arquivo4 = open("cont.txt", 'w')
for i in range(self.vertices):
cont[len(self.grafo[i])] = cont[len(self.grafo[i])] + 1
if maior[1] < len(self.grafo[i]):
maior[0] = i
maior[1] = len(self.grafo[i])
elif menor[1] > len(self.grafo[i]):
menor[0] = i
menor[1] = len(self.grafo[i])
soma = soma + len(self.grafo[i])
for i in range(self.vertices):
if cont[i] != 0:
frequencia.append((i, cont[i] / self.vertices))
st = str(i) + ':' + str(100 * cont[i] / self.vertices) + '\n'
arquivo4.write(st)
arquivo4.close()
media = float(soma) / float(self.vertices)
return (maior, menor, media, frequencia)
def busca_em_largura2(self, s):
# cria um arquivo com arestas percorridas por largura
# Mede tempo
tinicio = timeit.default_timer()
arquivo2 = open('largura.txt', 'w')
desc = [0 for _ in range(self.vertices)]
Q = [s]
R = [s]
desc[s] = 1
ordem = [-1 for _ in range(self.vertices)]
ordem[s] = 0
while len(Q) != 0:
u = Q.pop(0)
for (v, a) in self.grafo[u]:
if desc[v] == 0:
Q.append(v)
R.append(v)
desc[v] = 1
if ordem[v] == -1:
ordem[v] = ordem[u] + 1
for i in range(len(ordem)):
if ordem[i] != -1:
saidas = str(i) + ':' + str(ordem[i]) + '\n'
arquivo2.write(saidas)
arquivo2.close()
tfim=timeit.default_timer()
return(tfim-tinicio)
def busca_de_profundidade2(self, s):
# cria um arquivo com arestas percorridas por profundidade
# Mede tempo
tinicio = timeit.default_timer()
arquivo3 = open('profundidade.txt', 'w')
desc = [0 for _ in range(self.vertices)]
S = [s]
R = [s]
desc[s] = 1
ordem = [-1 for _ in range(self.vertices)]
ordem[s] = 0
while len(S) != 0:
u = S[-1]
desempilhar = True
for (v, a) in self.grafo[u]:
if desc[v] == 0:
desempilhar = False
S.append(v)
R.append(v)
desc[v] = 1
if ordem[v] == -1:
ordem[v] = ordem[u] + 1
break
if desempilhar:
S.pop()
for i in range(len(ordem)):
if ordem[i] != -1:
saidas = str(i) + ':' + str(ordem[i]) + '\n'
arquivo3.write(saidas)
arquivo3.close()
tfim=timeit.default_timer()
return(tfim-tinicio)
def busca_largura(self, comp, s):
# busca em largura para lista
desc = [0 for _ in range(self.vertices)]
Q = [s]
R = [s]
desc[s] = 1
comp[s] = 0
while len(Q) != 0:
u = Q.pop(0)
for (v, a) in self.grafo[u]:
if desc[v] == 0:
Q.append(v)
R.append(v)
desc[v] = 1
comp[v] = 0
return R
def conexos(self):
# Numero de componentes conexas no grafo, e vertices na componentes
componente = [1 for _ in range(self.vertices)]
t = []
k = 0
for i in range(self.vertices):
aux = 0
if componente[i] != 0:
busca = self.busca_largura(componente, i)
t.append(len(busca))
k = k + 1
for j in range(self.vertices):
if componente[j] == 0:
aux = aux + 1
if aux == self.vertices:
return (k, t)
|
{"/main.py": ["/funcoes_de_matrizes.py", "/funcoes_de_listas.py"]}
|
30,464,230
|
wolfMatheus/Grafos
|
refs/heads/master
|
/funcoes_de_matrizes.py
|
import numpy as np
import timeit
class GrafoMatriz:
# Inicializador
def __init__(self, arquivo):
# Classe com todos os atributos e metodos do grafo na representação matriz de adjacencias
(self.vertices, self.arestas, self.grafo) = self.cria_adjacencia(arquivo)
(self.maior_grau, self.menor_grau, self.grau_medio, self.frequencia) = self.definir_graus()
#busca no vertices 1
self.tempo_largura=self.busca_em_largura(0)
self.tempo_profundidade=self.busca_de_profundidade(0)
(self.componentes_conexas, self.num_conexa) = self.conexo()
def cria_adjacencia(self, arquivo):
# cria matriz de adjacencias
#lendo a primeira linha
header = arquivo.readline()
info = header.split(" ")
#salvando nvertices e narestas
vertices = int(info[0])
arestas = int(info[1])
#criando matriz de zeros
#[[0 for _ in range(vertices)] for _ in range(vertices)]
matriz = (np.zeros((vertices,vertices)))
for header in arquivo:
# recuperando dados do arquivo (origem, destino e peso)
info = header.split(' ')
origem = int(info[0])
destino = int(info[1])
peso = int(info[2])
# criar matriz de adjacencias
matriz[origem][destino] = peso
matriz[destino][origem] = peso
return (vertices, arestas, matriz)
def definir_graus(self):
# #C vertice de maior e menor grau, o grau medio dos vertices e a distribuicão empirica do grau dos vertices
maior = [0, 0]
menor = [0, 1000000]
soma = 0
distribuicao = [0 for _ in range(self.vertices)]
frequencia = []
arquivo4 = open("distribuicao.txt", 'w')
for i in range(self.vertices):
aux = 0
for j in range(self.vertices):
if self.grafo[i][j] != 0:
aux = aux + 1
distribuicao[aux] = distribuicao[aux] + 1
if maior[1] < aux:
maior[0] = i
maior[1] = aux
elif menor[1] > aux:
menor[0] = i
menor[1] = aux
soma = soma + aux
for i in range(self.vertices):
if distribuicao[i] != 0:
frequencia.append((i, distribuicao[i]))
st = str(i) + ':' + str(100 * distribuicao[i] / self.vertices) + '\n'
arquivo4.write(st)
arquivo4.close()
media = float(soma)/float(self.vertices)
return (maior, menor, media, frequencia)
def busca_em_largura(self, s):
# cria um arquivo com arestas percorridas por largura
# Mede tempo
tinicio = timeit.default_timer()
arquivo2 = open('nivel_largura.txt', 'w')
desc = [0 for _ in range(len(self.grafo))]
Q = [s]
R = [s]
desc[s] = 1
ordem = [-1 for _ in range(len(self.grafo))]
ordem[s] = 0
while len(Q) != 0:
u = Q.pop(0)
for v in range(len(self.grafo[u])):
if self.grafo[u][v] != 0 and desc[v] == 0:
Q.append(v)
R.append(v)
desc[v] = 1
if ordem[v] == -1:
ordem[v] = ordem[u] + 1
for i in range(len(ordem)):
if ordem[i] != -1:
saidas = str(i) + ':' + str(ordem[i]) + '\n'
arquivo2.write(saidas)
arquivo2.close()
tfim=timeit.default_timer()
return(tfim-tinicio)
def busca_de_profundidade(self, s):
# cria um arquivo com arestas percorridas por profundidade
# Mede tempo
tinicio = timeit.default_timer()
arquivo3 = open('nivel_profundidade.txt', 'w')
desc = [0 for _ in range(len(self.grafo))]
S = [s]
R = [s]
desc[s] = 1
ordem = [-1 for _ in range(len(self.grafo))]
ordem[s] = 0
while len(S) != 0:
u = S[-1]
desempilhar = True
for v in range(len(self.grafo[u])):
if self.grafo[u][v] != 0 and desc[v] == 0:
desempilhar = False
S.append(v)
R.append(v)
desc[v] = 1
if ordem[v] == -1:
ordem[v] = ordem[u] + 1
break
if desempilhar:
S.pop()
for i in range(len(ordem)):
if ordem[i] != -1:
saidas = str(i) + ':' + str(ordem[i]) + '\n'
arquivo3.write(saidas)
arquivo3.close()
tfinal=timeit.default_timer()
return(tfinal-tinicio)
def busca_largura(self, comp, s):
# busca em largura
desc = [0 for _ in range(self.vertices)]
Q = [s]
R = [s]
desc[s] = 1
comp[s] = 0
while len(Q) != 0:
u = Q.pop(0)
for v in range(self.vertices):
if self.grafo[u][v] != 0 and desc[v] == 0:
Q.append(v)
R.append(v)
desc[v] = 1
comp[v] = 0
return R
def conexo(self):
# numero de componentes conexas no grafo, e vertices na componentes
componente = [1 for _ in range(self.vertices)]
t = []
k = 0
for i in range(len(self.grafo)):
aux = 0
if componente[i] != 0:
busca = self.busca_largura(componente, i)
t.append(len(busca))
k = k + 1
for j in range(self.vertices):
if componente[j] == 0:
aux = aux + 1
if aux == self.vertices:
return (k, t)
|
{"/main.py": ["/funcoes_de_matrizes.py", "/funcoes_de_listas.py"]}
|
30,465,944
|
310Passco/refrigerator
|
refs/heads/master
|
/coolboxapp/migrations/0007_auto_20210717_2134.py
|
# Generated by Django 3.2.5 on 2021-07-17 12:34
import coolboxapp.models
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('coolboxapp', '0006_alter_customuser_email'),
]
operations = [
migrations.AlterModelManagers(
name='customuser',
managers=[
('objects', coolboxapp.models.Manager()),
],
),
migrations.RemoveField(
model_name='food',
name='owner',
),
migrations.AlterField(
model_name='customuser',
name='email',
field=models.EmailField(max_length=254, unique=True, verbose_name='email address'),
),
migrations.AlterField(
model_name='customuser',
name='username',
field=models.CharField(max_length=150, unique=True, verbose_name='username'),
),
]
|
{"/coolboxapp/views.py": ["/coolboxapp/models.py", "/coolboxapp/forms.py"], "/coolboxapp/urls.py": ["/coolboxapp/views.py"], "/coolboxapp/forms.py": ["/coolboxapp/models.py"], "/coolboxapp/migrations/0007_auto_20210717_2134.py": ["/coolboxapp/models.py"]}
|
30,465,945
|
310Passco/refrigerator
|
refs/heads/master
|
/coolboxapp/migrations/0002_food.py
|
# Generated by Django 3.2.5 on 2021-07-07 07:55
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('coolboxapp', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='Food',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('owner', models.CharField(max_length=100)),
('name', models.CharField(max_length=250)),
('deadline', models.DateField()),
],
),
]
|
{"/coolboxapp/views.py": ["/coolboxapp/models.py", "/coolboxapp/forms.py"], "/coolboxapp/urls.py": ["/coolboxapp/views.py"], "/coolboxapp/forms.py": ["/coolboxapp/models.py"], "/coolboxapp/migrations/0007_auto_20210717_2134.py": ["/coolboxapp/models.py"]}
|
30,474,524
|
kduy1411/Projectpythonnc
|
refs/heads/main
|
/mydrone/read_products.py
|
import json
import urllib.request
DEFAULT_ENCODING = 'utf-8'
url = 'http://127.0.0.1:8000/api/drones/'
urlResponse = urllib.request.urlopen(url)
if hasattr(urlResponse.headers, 'get_content_charset'):
encoding = urlResponse.headers.get_content_charset(DEFAULT_ENCODING)
else:
encoding = urlResponse.headers.getparam('charset') or DEFAULT_ENCODING
drones = json.loads(urlResponse.read().decode(encoding))
print(drones[0]['name'])
|
{"/mydrone/views.py": ["/mydrone/serializers.py"], "/cart/views.py": ["/mydrone/models.py"], "/mydrone/admin.py": ["/mydrone/models.py"], "/orders/views.py": ["/orders/forms.py"], "/mydrone/serializers.py": ["/mydrone/models.py"]}
|
30,474,525
|
kduy1411/Projectpythonnc
|
refs/heads/main
|
/mydrone/serializers.py
|
''' Serialisers.py'''
# from django.contrib.auth.models import User, Group
from .models import Category, Drone
from rest_framework import serializers
class DroneSerializer(serializers.HyperlinkedModelSerializer):
category = serializers.CharField(source='category.id')
class Meta:
model = Drone
fields = '__all__'
class CategorySerializer(serializers.HyperlinkedModelSerializer):
drones = DroneSerializer(many=True, read_only=True)
class Meta:
model = Category
fields = ('name')
|
{"/mydrone/views.py": ["/mydrone/serializers.py"], "/cart/views.py": ["/mydrone/models.py"], "/mydrone/admin.py": ["/mydrone/models.py"], "/orders/views.py": ["/orders/forms.py"], "/mydrone/serializers.py": ["/mydrone/models.py"]}
|
30,474,526
|
kduy1411/Projectpythonnc
|
refs/heads/main
|
/mydrone/admin.py
|
from django.contrib import admin
# Register your models here.
from .models import Category, Drone, Client, Customer, UserProfileInfo
admin.site.register(Category)
admin.site.register(Drone)
admin.site.register(Client)
admin.site.register(Customer)
admin.site.register(UserProfileInfo)
|
{"/mydrone/views.py": ["/mydrone/serializers.py"], "/cart/views.py": ["/mydrone/models.py"], "/mydrone/admin.py": ["/mydrone/models.py"], "/orders/views.py": ["/orders/forms.py"], "/mydrone/serializers.py": ["/mydrone/models.py"]}
|
30,503,468
|
saikarthik007/Alarm-Clock
|
refs/heads/main
|
/alarm_clock.py
|
"""
Application to set alarm based on date and time
Developed by : Karthik C
"""
import time
from datetime import datetime
from playsound import playsound
class AlarmClock:
"""class to set alarm on time based on user input"""
def __init__(self, alarm_date, alarm_time):
""" init function"""
self.alarm_time = alarm_time
self.alarm_date = alarm_date
def set_alarm(self):
""" function to trigger alarm"""
while True:
time.sleep(1)
current_time = datetime.now()
now = current_time.strftime("%H:%M:%S")
date = current_time.strftime("%d/%m/%Y")
if date == self.alarm_date and now == self.alarm_time:
print("Time to Wake up")
playsound('alarm.mp3')
break
def verify_user_input(self):
"""verify user given date and time is not in past"""
current_time = datetime.now()
now = current_time.strftime("%H:%M:%S")
date = current_time.strftime("%d/%m/%Y")
if self.alarm_date < date:
print("Entered date is in past cant set alarm!")
elif self.alarm_time < now:
print("Entered time is in past cant set alarm!")
else:
print("Setting up alarm...")
self.set_alarm()
if __name__ == "__main__":
# get date from user to set alarm on particular date
ALARM_DAY = input("Enter the date to set alarm : DD/MM/YYYY\n")
# get time from user to set alarm on time
ALARM_TIME = input("Enter the time to set alarm : HH:MM:SS\n")
# verify user given date and time is not in past
AlarmClock(ALARM_DAY, ALARM_TIME).verify_user_input()
|
{"/test_alarm_clock.py": ["/alarm_clock.py"]}
|
30,519,157
|
RogerXu1048/VandyHack
|
refs/heads/main
|
/body_template.py
|
#body.py: Body class for gravitational simulation
from codinglib import *
class Body:
def __init__(self, mass, x, y, vx, vy, pixel_radius, r, g, b): #initializes a Body object (9 lines)
self.mass = mass
self.x = x
self.y = y
self.vx = vx
self.vy = vy
self.pixel_radius = pixel_radius
self.r = r
self.g = g
self.b = b
'''self.anything = that parameter'''
def get_mass(self): #returns the mass of a Body object (1 line)
return self.mass
'''return the mass of an unspecified Body object'''
def get_x(self): #returns the x position of a Body object (1 lines)
return self.x
'''return the x position of an unspecified Body object'''
#returns the y position of a Body object (1 line)
def get_y(self):
return self.y
'''return the y position of an unspecified Body object'''
#updates the position of a Body object for a given timestep (2 lines)
def update_position(self, timestep):
self.x = self.x + self.vx * timestep
self.y = self.y + self.vy * timestep
'''x = x + vx*timestep, just like before, but for an unspecified Body object
do it for y too'''
#updates velocities of a Body object for a given timestep (2 lines)
def update_velocity(self, ax, ay, timestep):
self.vx = self.vx + ax * timestep
self.vy = self.vy + ay * timestep
'''just like our position updates, but between velocity and acceleration
instead of position and velocity'''
#has Body object draw itself (4 lines)
def draw(self, cx, cy,pixels_per_meter):
set_fill_color(self.r, self.g, self.b)
disable_stroke()
enable_fill()
draw_circle(pixels_per_meter * self.x+cx, pixels_per_meter * self.y +cy, self.pixel_radius)
'''set the appropriate fill color, disable stroke, enable fill, and draw a circle
circle's center should be the (pixels per meter * the unspecified x) away from the center x value
its center should be the (pixels per meter * the unspecified y) away from the center y value
its radius should be the unspecified pixel radius'''
|
{"/solar_template.py": ["/body_template.py", "/system_template.py"], "/system_template.py": ["/body_template.py"]}
|
30,519,158
|
RogerXu1048/VandyHack
|
refs/heads/main
|
/system_template.py
|
#system.py
#A driver that allows different bodies to interact, defines System class
from math import sqrt
from body_template import Body
UNIVERSAL_GRAVITATIONAL_CONSTANT = 6.67384e-11
class System:
def __init__(self, body_list): #initializes a System (1 line)
self.body_list = body_list
'''store the body list to the unspecified body list'''
def add(self, body):
self.body_list.append(body)
def delete(self, body):
self.body_list.remove(body)
def draw(self, cx, cy, pixels_per_meter): #draws System by drawing each body in body list (1 extra line)
for body in self.body_list:
body.draw(cx, cy, pixels_per_meter)
'''for each body in the unspecified body list'''
def dist(self, n1, n2): #compute the distance between bodies n1 and n2. (1 extra line)
dx = self.body_list[n2].get_x() - self.body_list[n1].get_x()
dy = self.body_list[n2].get_y() - self.body_list[n1].get_y()
'''return the distance between the two bodies'''
return sqrt(dx*dx+dy*dy)
def compute_acceleration(self, n): #computes the acceleration of all other bodies on body n (9 added lines)
'''start the total x and y accelerations at 0. (2 lines here)'''
n_x = self.body_list[n].get_x()
n_y = self.body_list[n].get_y()
total_ax = 0
total_ay = 0
#'''for index in the range of the length of the unspecified body list (1 line here)'''
# '''if the index is not the same as the body we are finding the accelerations for (1 line here)'''
# '''set r to the unspecified distance between the index & the body we're calculating for (1 line here)'''
# '''calculate acceleration (a) using self.body_list[i].get_mass() to get the index's mass.
# use this from Newton's Universal Law of Gravitation: a = G * mass / r^2 (1 line here)'''
for i in range(len(self.body_list)):
if n != i:
r=self.dist(i,n)
a = UNIVERSAL_GRAVITATIONAL_CONSTANT * self.body_list[i].get_mass() / (r*r)
dx = self.body_list[i].get_x() - n_x
ax = a * dx / r
dy = self.body_list[i].get_y() - n_y
ay = a * dy / r
total_ax += ax
total_ay += ay
return (total_ax,total_ay)
# #Now you have the acceleration. Break it down into its x and y components and add them to running sums
# '''add these x and y acceleration to the total x and y accelerations (2 lines here)'''
#'''return the total x and y accelerations, typing it as a point (1 line here)'''
def update(self, timestep): #computes body's x & y, then uses the acceleration to update each body's velocity
for n in range(len(self.body_list)):
self.body_list[n].update_position(timestep)
for n in range(len(self.body_list)):
(ax, ay) = self.compute_acceleration(n)
self.body_list[n].update_velocity(ax, ay, timestep)
|
{"/solar_template.py": ["/body_template.py", "/system_template.py"], "/system_template.py": ["/body_template.py"]}
|
30,519,159
|
RogerXu1048/VandyHack
|
refs/heads/main
|
/solar_template.py
|
#solar.py
#This uses the first four planets of the solar system and the sun
from codinglib import*
from body_template import Body
from system_template import System
from math import sqrt
'''import all of codinglib, the System class from system program, and the Body class from your body program (3 lines)
the window width and height should both be 600, but you just need to specify something (2 lines)'''
WINDOW_WIDTH = 800
WINDOW_HEIGHT = 800
AU = 1.49598e11 #number of meters per astronomical unit
EM = 5.9736e24 #mass of the Earth in kilograms
TIME_SCALE = 3.0e6 #how many real seconds for each second of simulation
PIXELS_PER_METER = 120 / AU /3 #distance scale for the simulation
FRAME_RATE = 30
TIMESTEP = 1.0 / FRAME_RATE #time between drawing each frame
#Solar system data from http://hyperphysics.phy-astr.gsu.edu/hbase/solar/soldata2.html
'''
initialize the sun, mercury, earth, and mars each as a instance of the Body class (5 lines)
the parameters are listed in the Body class, of course
the yellow one's the sun, mercury is orange, venus purple, earth blue-green, mars red
mo' planets mo' credit
initialize the solar system as a instance of the System class (1 line)
the lone parameter it takes is the list of bodies
'''
Sun=Body(1.9891e30,0,0,0,0,25/2,1,1,0)
Mercury=Body(3.3e23,5.79e10,0,0,47400,4/2,1,0.8,0)
Earth=Body(5.976e24,1.496e11,0,0,29800,12/2,0,1,1)
Mars=Body(6.42e23,2.279e11,0,0,24100,6/2,1,0,0)
Venus=Body(4.87e24,1.082e11,0,0,35000,11/2,1,0,1)
Jupiter=Body(1.9e27,7.786e11,0,0,13100,18/2,1,1,1)
Saturn=Body(5.69e26,1.433e12,0,0,9600,13/2,0.8,0.6,1)
'''Sun1=Body(1.9891e30,-2e11,0,0,0,16,1,1,0)
Sun2=Body(1.9891e30,2e11,0,0,0,16,1,0,0)
Sun3=Body(1.9891e30,0,sqrt(3)*2e11,0,0,16,0,1,0)'''
solar_system=System([Sun,Mercury,Earth,Mars])
'''solar_system=System([Sun1, Sun2, Sun3])'''
'''def clicked():
global drawn
drawn = True'''
'''drawn = False
def clicked(mx, my):
global drawn
drawn=True'''
def main():
#give it a black background and enable smoothing (2 lines)
#clear the screen #
# (1 line)
set_clear_color(0,0,0)
enable_smoothing()
clear()
solar_system.draw(WINDOW_WIDTH/2 , WINDOW_HEIGHT/2 , PIXELS_PER_METER) #draw the system
solar_system.update(TIME_SCALE * TIMESTEP) #and update the relevant numbers
bt1 = Button(100, 600, 100, 100, False)
bt2 = Button(100, 350, 100, 100, False)
bt3 = Button(100, 100, 100, 100, False)
bt1.draw_rec_button()
bt2.draw_rec_button()
bt3.draw_rec_button()
if bt1.is_clicked():
# coding_quit()
# start_graphics(main2, "Sn", width=WINDOW_WIDTH, height=WINDOW_HEIGHT, framerate=FRAME_RATE)
Venus = Body(4.87e24, 1.082e11, 0, 0, 35000, 11/2, 1, 0, 1)
solar_system.add(Venus)
if bt2.is_clicked():
# coding_quit()
# start_graphics(main2, "Sn", width=WINDOW_WIDTH, height=WINDOW_HEIGHT, framerate=FRAME_RATE)
Jupiter = Body(1.9e27, 7.786e11, 0, 0, 13100, 18 / 2, 1, 1, 1)
solar_system.add(Jupiter)
if bt3.is_clicked():
# coding_quit() # start_graphics(main2, "Sn", width=WINDOW_WIDTH, height=WINDOW_HEIGHT, framerate=FRAME_RATE)
Saturn = Body(5.69e26, 1.433e12, 0, 0, 9600, 13 / 2, 0.8, 0.6, 1)
solar_system.add(Saturn)
'''start the graphics software with main as a parameter, then a title, width is your window width,
height is your window height, and window height (1 line)'''
start_graphics(main, width = WINDOW_WIDTH, height = WINDOW_HEIGHT, framerate = 60)
|
{"/solar_template.py": ["/body_template.py", "/system_template.py"], "/system_template.py": ["/body_template.py"]}
|
30,579,467
|
DanielKusyDev/proton
|
refs/heads/develop
|
/utils.py
|
import base64
import os
import secrets
import sqlite3
import ssl
import string
from datetime import datetime
import settings
from core import models, messages
class ProtonError(Exception):
"""Proton protocol error base class"""
pass
def generate_token(length=40):
result = ''.join((secrets.choice(string.ascii_letters) for _ in range(length)))
return result
def validate_auth(fn):
def wrapper(*args, **kwargs):
controller, message = args
try:
token = controller.auth_token
assert controller.auth_token is not None
token_model = models.AuthToken(controller.db_name)
assert token_model.is_valid(token=token)
except (KeyError, AssertionError, ProtonError):
raise PermissionError("Permission denied. Authorization required.")
else:
return fn(*args, **kwargs)
return wrapper
def create_conn(db_name=settings.DATABASE):
try:
conn = sqlite3.connect(db_name)
return conn
except sqlite3.Error as e:
print(e)
def create_db(db_name=settings.DATABASE):
conn = create_conn(db_name)
cursor = conn.cursor()
with open("core/db/create_db.sql", "r") as script:
cursor.executescript(script.read())
def get_image_base64(path):
with open(path, "rb") as file:
image = file.read()
image = base64.b64encode(image).decode()
return image
class Logger(object):
def __init__(self, log_dir="logs", max_log_dir_size=5 * 10 ** 6):
self.log_dir = log_dir
self.log_template = "[%d/%b/%Y %H:%M:%S] {message}"
self.max_log_dir_size = max_log_dir_size
self.filename_prefix = "proton_std"
def get_log_filename(self):
if not os.path.exists(self.log_dir):
os.mkdir(self.log_dir)
all_log_files = sorted(filter(lambda path: self.filename_prefix in path, os.listdir(self.log_dir)))
if not all_log_files:
filename = f"{self.log_dir}/{self.filename_prefix}.log"
else:
last_file = f"{self.log_dir}/{all_log_files[-1]}"
if os.stat(last_file).st_size < self.max_log_dir_size:
filename = last_file
else:
last_file_name_without_ext, _ = last_file.split(".")
try:
file_number = int(last_file_name_without_ext[-1])
except ValueError:
file_number = 1
filename = f"{self.log_dir}/{self.filename_prefix}{file_number}.log"
return filename
def _get_log_body(self, message):
now = datetime.now()
log_without_date = self.log_template.format(message=message)
full_log = now.strftime(log_without_date)
return full_log
def _write(self, message):
log = self._get_log_body(message)
log = log.strip("| :")
filename = self.get_log_filename()
with open(filename, "a") as file:
file.write(log + "\n")
print(log)
def error(self, action, message="", host=""):
if settings.DEBUG:
message = f"{host} | ERROR: {message} | {action}"
else:
message = f"{host} | ERROR: Unexpected error"
self._write(message)
def success(self, action, message="", host=""):
message = f"{host} | OK: {message} | {action}"
self._write(message)
def warning(self, action, message="", host=""):
message = f"{host} | WRONG: {message} | {action}"
self._write(message)
def info(self, message):
self._write(message)
|
{"/controllers.py": ["/models.py", "/utils.py"], "/models.py": ["/settings.py", "/utils.py"], "/server.py": ["/message.py", "/utils.py", "/controllers.py"], "/message.py": ["/utils.py"], "/utils.py": ["/models.py", "/settings.py"], "/tests.py": ["/models.py", "/utils.py", "/controllers.py", "/message.py", "/settings.py", "/backend/server.py", "/core/controllers.py", "/core/messages.py"], "/backend/crypto.py": ["/settings.py"], "/core/models.py": ["/settings.py", "/utils.py"], "/runserver.py": ["/utils.py", "/backend/server.py", "/settings.py"], "/backend/server.py": ["/settings.py", "/utils.py"], "/core/controllers.py": ["/settings.py", "/utils.py", "/core/messages.py"], "/core/messages.py": ["/utils.py"]}
|
30,579,468
|
DanielKusyDev/proton
|
refs/heads/develop
|
/backend/crypto.py
|
import os
import settings
from cryptography.fernet import Fernet
import base64
from cryptography.hazmat.backends import default_backend
from cryptography.hazmat.primitives import hashes
from cryptography.hazmat.primitives.kdf.pbkdf2 import PBKDF2HMAC
def generate_key():
password_provided = settings.SECRET_KEY
password = password_provided.encode()
salt = settings.SALT
kdf = PBKDF2HMAC(
algorithm=hashes.SHA256(),
length=32,
salt=salt,
iterations=100000,
backend=default_backend()
)
key = base64.urlsafe_b64encode(kdf.derive(password))
return key
def encrypt(message):
key = generate_key()
message = message.encode()
f = Fernet(key)
encrypted_message = f.encrypt(message)
return encrypted_message.decode()
def compare(raw, encrypted):
return raw == decrypt(encrypted)
def decrypt(encrypted_message):
key = generate_key()
encrypted_message = encrypted_message.encode()
f = Fernet(key)
message = f.decrypt(encrypted_message)
return message.decode()
|
{"/controllers.py": ["/models.py", "/utils.py"], "/models.py": ["/settings.py", "/utils.py"], "/server.py": ["/message.py", "/utils.py", "/controllers.py"], "/message.py": ["/utils.py"], "/utils.py": ["/models.py", "/settings.py"], "/tests.py": ["/models.py", "/utils.py", "/controllers.py", "/message.py", "/settings.py", "/backend/server.py", "/core/controllers.py", "/core/messages.py"], "/backend/crypto.py": ["/settings.py"], "/core/models.py": ["/settings.py", "/utils.py"], "/runserver.py": ["/utils.py", "/backend/server.py", "/settings.py"], "/backend/server.py": ["/settings.py", "/utils.py"], "/core/controllers.py": ["/settings.py", "/utils.py", "/core/messages.py"], "/core/messages.py": ["/utils.py"]}
|
30,579,469
|
DanielKusyDev/proton
|
refs/heads/develop
|
/core/models.py
|
import base64
import datetime
import os
import sqlite3
import abc
from time import strptime
from uuid import uuid4
from backend import crypto
import settings
import utils
class Model(abc.ABC):
fields = []
write_only = []
def __init__(self, db_name=settings.DATABASE):
self.table_name = self.__class__.__name__.lower()
self.conn = utils.create_conn(db_name=db_name)
def __del__(self):
self.conn.close()
def fetch(self, cursor, many=True):
results = cursor.fetchall() if many else cursor.fetchone()
return results
def get_fields(self):
return ",".join(self.fields)
def get_table_cols(self):
sql = f"PRAGMA table_info({self.table_name})"
cursor = self.conn.cursor()
cursor.execute(sql)
raw_cols = self.fetch(cursor, True)
def map_col_type(col):
c_type = col[2].lower()
if "integer" in c_type:
return c_type
elif "char" in c_type:
return str
elif "datetime" in c_type:
return datetime.timedelta
cols = {col[1]: map_col_type(col) for col in raw_cols}
return cols
def get_conditions(self, filters):
conditions = [f"{key}=:{key}" for key, val in filters.items()]
conditions = f" and ".join(conditions)
return conditions
def execute_sql(self, sql, params=()) -> sqlite3.Cursor:
cursor = self.conn.cursor()
cursor.execute(sql, params)
self.conn.commit()
return cursor
def create(self, **kwargs):
placeholder = ",".join("?" * len(self.fields))
params = [kwargs[field] for field in self.fields]
sql = f"""INSERT INTO {self.table_name}({self.get_fields()}) VALUES({placeholder})"""
self.execute_sql(sql, params)
return self.last()
def all(self):
sql = f"SELECT * FROM {self.table_name}"
cursor = self.execute_sql(sql)
users = self.fetch(cursor)
return users
def first(self, **kwargs):
if kwargs:
conditions = self.get_conditions(kwargs)
sql = f"SELECT * FROM {self.table_name} WHERE {conditions} LIMIT 1"
else:
sql = f"SELECT * FROM {self.table_name} LIMIT 1"
cursor = self.execute_sql(sql, kwargs)
return self.fetch(cursor, False)
def last(self, **kwargs):
if kwargs:
conditions = self.get_conditions(kwargs)
sql = f"SELECT * FROM {self.table_name} WHERE {conditions} ORDER BY id DESC LIMIT 1"
else:
sql = f"SELECT * FROM {self.table_name} ORDER BY id DESC LIMIT 1"
cursor = self.execute_sql(sql, kwargs)
return self.fetch(cursor, False)
def filter(self, **kwargs):
conditions = self.get_conditions(kwargs)
sql = f"SELECT * FROM {self.table_name} WHERE {conditions}"
cursor = self.execute_sql(sql, kwargs)
objects = self.fetch(cursor, True)
return objects
def update(self, data: dict, where: dict):
data_placeholder = " = ?, ".join(data.keys()) + " = ?"
where_placeholder = " = ?, ".join(where.keys()) + " = ?"
sql = f"UPDATE {self.table_name} SET {data_placeholder} WHERE {where_placeholder}"
params = list(data.values()) + list(where.values())
cursor = self.execute_sql(sql, params)
return self.first(**data)
def delete(self, **kwargs):
obj = self.first(**kwargs)
conditions = self.get_conditions(kwargs)
sql = f"DELETE FROM {self.table_name} WHERE {conditions}"
self.execute_sql(sql, kwargs)
return obj
class Post(Model):
fields = ["image", "content", "title", "user_id"]
class User(Model):
fields = ["username", "password"]
write_only = ["password"]
def create(self, **kwargs):
kwargs["password"] = crypto.encrypt(kwargs.get("password"))
return super(User, self).create(**kwargs)
class AuthToken(Model):
fields = ["token", "user_id", "expires"]
write_only = ["token", "expires"]
def get_fresh_expiration(self):
expires = datetime.datetime.now() + datetime.timedelta(**settings.EXPIRATION)
return expires
def create(self, user_id):
token = utils.generate_token()
expires = self.get_fresh_expiration()
return super(AuthToken, self).create(token=token, user_id=user_id, expires=expires)
def is_valid(self, **kwargs):
token = self.first(**kwargs)
if token is None:
raise utils.ProtonError("Not found.")
expires_date = strptime(token[3], "%Y-%m-%d %H:%M:%S.%f")
expires = datetime.datetime(year=expires_date.tm_year, month=expires_date.tm_mon, day=expires_date.tm_mday,
hour=expires_date.tm_hour, minute=expires_date.tm_min, second=expires_date.tm_sec)
return datetime.datetime.now() < expires
|
{"/controllers.py": ["/models.py", "/utils.py"], "/models.py": ["/settings.py", "/utils.py"], "/server.py": ["/message.py", "/utils.py", "/controllers.py"], "/message.py": ["/utils.py"], "/utils.py": ["/models.py", "/settings.py"], "/tests.py": ["/models.py", "/utils.py", "/controllers.py", "/message.py", "/settings.py", "/backend/server.py", "/core/controllers.py", "/core/messages.py"], "/backend/crypto.py": ["/settings.py"], "/core/models.py": ["/settings.py", "/utils.py"], "/runserver.py": ["/utils.py", "/backend/server.py", "/settings.py"], "/backend/server.py": ["/settings.py", "/utils.py"], "/core/controllers.py": ["/settings.py", "/utils.py", "/core/messages.py"], "/core/messages.py": ["/utils.py"]}
|
30,579,470
|
DanielKusyDev/proton
|
refs/heads/develop
|
/runserver.py
|
import os
import utils
from backend.server import Server
import settings
if not os.path.exists(settings.DATABASE):
utils.create_db(settings.DATABASE)
server = Server((settings.HOST, settings.PORT))
server.runserver()
|
{"/controllers.py": ["/models.py", "/utils.py"], "/models.py": ["/settings.py", "/utils.py"], "/server.py": ["/message.py", "/utils.py", "/controllers.py"], "/message.py": ["/utils.py"], "/utils.py": ["/models.py", "/settings.py"], "/tests.py": ["/models.py", "/utils.py", "/controllers.py", "/message.py", "/settings.py", "/backend/server.py", "/core/controllers.py", "/core/messages.py"], "/backend/crypto.py": ["/settings.py"], "/core/models.py": ["/settings.py", "/utils.py"], "/runserver.py": ["/utils.py", "/backend/server.py", "/settings.py"], "/backend/server.py": ["/settings.py", "/utils.py"], "/core/controllers.py": ["/settings.py", "/utils.py", "/core/messages.py"], "/core/messages.py": ["/utils.py"]}
|
30,579,471
|
DanielKusyDev/proton
|
refs/heads/develop
|
/settings.py
|
import codecs
from configparser import RawConfigParser
parser = RawConfigParser()
parser.read_file(codecs.open("config.ini", "r", "utf-8"))
SECRET_KEY = parser.get("SECRET", "KEY")
SALT = parser.get("SECRET", "SALT").encode()
DEBUG = parser.get("GENERAL", "DEBUG")
EXPIRATION = {
"minutes": 15
}
DATABASE = "core/db/sqlite3.db"
MEDIA_ROOT = "assets"
PORT = 6666
HOST = "0.0.0.0"
CERTS_DIR = "backend/certs"
|
{"/controllers.py": ["/models.py", "/utils.py"], "/models.py": ["/settings.py", "/utils.py"], "/server.py": ["/message.py", "/utils.py", "/controllers.py"], "/message.py": ["/utils.py"], "/utils.py": ["/models.py", "/settings.py"], "/tests.py": ["/models.py", "/utils.py", "/controllers.py", "/message.py", "/settings.py", "/backend/server.py", "/core/controllers.py", "/core/messages.py"], "/backend/crypto.py": ["/settings.py"], "/core/models.py": ["/settings.py", "/utils.py"], "/runserver.py": ["/utils.py", "/backend/server.py", "/settings.py"], "/backend/server.py": ["/settings.py", "/utils.py"], "/core/controllers.py": ["/settings.py", "/utils.py", "/core/messages.py"], "/core/messages.py": ["/utils.py"]}
|
30,579,472
|
DanielKusyDev/proton
|
refs/heads/develop
|
/backend/server.py
|
import os
import socket
import ssl
import threading
from time import sleep
import settings
from core import messages, controllers, models
from utils import Logger
logger = Logger()
def recv_all(sock: ssl.SSLSocket) -> str:
try:
result = ""
while result[-2:] != "\r\n":
result += sock.read(1).decode()
except ssl.SSLWantReadError as e:
print(e)
finally:
return result
def send(sock: ssl.SSLSocket, response: messages.Response) -> None:
lock = threading.Lock()
lock.acquire()
message_str = response.json_response
if isinstance(message_str, str):
message_str = message_str.encode()
sock.write(message_str)
host, port = sock.getpeername()
lock.release()
host = f"{host}:{port}"
message = response.message if response.message is not None else ""
log_args = (response.action, message, host)
if response.status.upper() == "OK":
logger.success(*log_args)
elif response.status.upper() == "ERROR":
logger.warning(*log_args)
else:
logger.error(*log_args)
class ClientThread(threading.Thread):
def __init__(self, secure_socket: ssl.SSLSocket):
super().__init__()
self.secure_socket = secure_socket
self.auth_token = None
def get_request(self):
raw_message = recv_all(self.secure_socket)
request = messages.Request(raw_message)
return request
def get_response(self, request) -> messages.Response:
controller = controllers.Controller(self.auth_token)
response = getattr(controller, request.action)(request)
if request.action == "login" and response.status == "OK":
token_id = response.data[0]["id"]
token = models.AuthToken().first(id=token_id)[2]
self.auth_token = token
elif request.action == "logout" and response.status == "OK":
self.auth_token = None
return response
def run(self) -> None:
while True:
try:
request = self.get_request()
response = self.get_response(request)
send(self.secure_socket, response)
except PermissionError as e:
response = messages.Response(status="ERROR", message=str(e))
send(self.secure_socket, response)
class Server(object):
def __init__(self, address=("127.0.0.1", 6666)):
self.address = address
def get_raw_socket(self) -> socket.socket:
raw_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
raw_socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
raw_socket.bind(self.address)
raw_socket.listen(100)
return raw_socket
def get_secure_socket(self, raw_socket: socket.socket) -> ssl.SSLSocket:
context = ssl.SSLContext(ssl.PROTOCOL_TLSv1_2)
context.load_cert_chain(os.path.join(settings.CERTS_DIR, "server.pem"), os.path.join(settings.CERTS_DIR, "server.key"))
ssock = context.wrap_socket(raw_socket, server_side=True)
return ssock
def process(self, server_socket: socket.socket):
try:
while True:
try:
conn, c_addr = server_socket.accept()
secure_client = self.get_secure_socket(conn)
except Exception as e:
logger.info(str(e))
continue
try:
logger.info(f"Connected by {c_addr[0]}:{c_addr[1]}")
c = ClientThread(secure_client)
c.start()
except Exception as e:
response = messages.Response(status="ERROR", message=str(e))
send(secure_client, response)
secure_client.close()
except Exception as e:
logger.info(str(e))
def runserver(self):
logger.info(f"Starting server at {self.address[0]}:{self.address[1]}")
server_socket = self.get_raw_socket()
self.process(server_socket)
|
{"/controllers.py": ["/models.py", "/utils.py"], "/models.py": ["/settings.py", "/utils.py"], "/server.py": ["/message.py", "/utils.py", "/controllers.py"], "/message.py": ["/utils.py"], "/utils.py": ["/models.py", "/settings.py"], "/tests.py": ["/models.py", "/utils.py", "/controllers.py", "/message.py", "/settings.py", "/backend/server.py", "/core/controllers.py", "/core/messages.py"], "/backend/crypto.py": ["/settings.py"], "/core/models.py": ["/settings.py", "/utils.py"], "/runserver.py": ["/utils.py", "/backend/server.py", "/settings.py"], "/backend/server.py": ["/settings.py", "/utils.py"], "/core/controllers.py": ["/settings.py", "/utils.py", "/core/messages.py"], "/core/messages.py": ["/utils.py"]}
|
30,579,473
|
DanielKusyDev/proton
|
refs/heads/develop
|
/core/controllers.py
|
import settings
from core import models
from backend import crypto
from utils import validate_auth
from core.messages import ModelResponse, Response
class Controller(object):
def __init__(self, auth_token, db_name=settings.DATABASE):
self.auth_token = auth_token
self.db_name = db_name
self.post_model = models.Post(self.db_name)
self.user_model = models.User(self.db_name)
self.auth_model = models.AuthToken(self.db_name)
def _get_token(self, user_id):
token = self.auth_model.first(user_id=user_id)
if token:
token = self.auth_model.update(data={"expires": self.auth_model.get_fresh_expiration()},
where={"user_id": user_id})
else:
token = self.auth_model.create(user_id=user_id)
return token
def register(self, request):
params = request.params
users = self.user_model.filter(username=params.get("username"))
if len(users) > 0:
return Response(status="ERROR", message="Given user already exists.", action="register")
username = params.get("username")
password = params.get("password")
self.user_model.create(username=username, password=password)
users = self.user_model.first(username=username)
return ModelResponse("OK", self.user_model, users, action="register")
def login(self, request):
params = request.params
username = params["username"]
password = params["password"]
user = self.user_model.first(username=username)
if not user or not crypto.compare(password, user[2]):
return Response(status="ERROR", message="Incorrect username or/and password.", action="login")
token = self._get_token(user[0])
return ModelResponse("OK", self.auth_model, token, action="login")
@validate_auth
def logout(self, request):
token = self.auth_token
self.auth_model.delete(token=token)
return Response("OK", action="logout")
@validate_auth
def create(self, request):
user_id = self.auth_model.first(token=self.auth_token)[1]
post = self.post_model.create(user_id=user_id, **request.params)
return ModelResponse(status="OK", model=self.post_model, raw_instance=post, action="create")
@validate_auth
def get(self, request):
instance = None
if getattr(request, "params", None) is not None and request.params.get("id", None) is not None:
post_id = request.params["id"]
if post_id is not None:
instance = self.post_model.filter(id=post_id)
else:
instance = self.post_model.all()
if instance:
return ModelResponse("OK", self.post_model, raw_instance=instance, action="get")
return Response("WRONG", "Not Found.", action="get")
@validate_auth
def alter(self, request):
post_id = request.params.pop("id")
instance = self.post_model.update(data=request.params, where={"id": post_id})
if instance:
return ModelResponse("OK", self.post_model, instance, action="alter")
return Response("WRONG", "Not Found.", action="alter")
@validate_auth
def delete(self, request):
post_id = request.params.pop("id")
obj = self.post_model.delete(id=post_id)
if obj is None:
return Response("WRONG", "Not Found.", action="delete")
return Response("OK", data={"id": post_id}, action="delete")
|
{"/controllers.py": ["/models.py", "/utils.py"], "/models.py": ["/settings.py", "/utils.py"], "/server.py": ["/message.py", "/utils.py", "/controllers.py"], "/message.py": ["/utils.py"], "/utils.py": ["/models.py", "/settings.py"], "/tests.py": ["/models.py", "/utils.py", "/controllers.py", "/message.py", "/settings.py", "/backend/server.py", "/core/controllers.py", "/core/messages.py"], "/backend/crypto.py": ["/settings.py"], "/core/models.py": ["/settings.py", "/utils.py"], "/runserver.py": ["/utils.py", "/backend/server.py", "/settings.py"], "/backend/server.py": ["/settings.py", "/utils.py"], "/core/controllers.py": ["/settings.py", "/utils.py", "/core/messages.py"], "/core/messages.py": ["/utils.py"]}
|
30,579,474
|
DanielKusyDev/proton
|
refs/heads/develop
|
/core/messages.py
|
import json
from typing import Union
import utils
from core import models
class Request(object):
def __init__(self, json_string):
self.required_action_params = {
"register": ["username", "password"],
"login": ["username", "password"],
"logout": None,
"get": None,
"create": ["content", "title"],
"alter": ["id"],
"delete": ["id"],
}
json_string = json_string
self.json_string = json_string
try:
self.obj = self.deserialize_json()
self.action = self.get_action()
self.params = self.get_params()
except (KeyError, AssertionError, json.JSONDecodeError) as e:
raise utils.ProtonError("Syntax Error")
def deserialize_json(self):
obj = json.loads(self.json_string)
return obj
def get_action(self):
action = self.obj["action"]
assert action in self.required_action_params.keys()
return action
def get_params(self):
params = self.obj.get("params", None)
if isinstance(params, dict):
if self.required_action_params[self.action] is not None:
for param in self.required_action_params[self.action]:
assert param in params.keys()
else:
assert params is None
assert self.required_action_params.get(self.action, None) is None
return params
class Response(object):
def __init__(self, status, message=None, data=None, action=""):
self.action = action.upper()
self.message = message
self.status = status
self.data = data
self.json_response = None
self.construct_json()
def construct_json(self):
_request = {
"status": self.status,
"message": self.message,
"data": self.data
}
request = {key: val for key, val in _request.items() if val is not None}
self.json_response = json.dumps(request)
self.json_response += "\r\n"
def __repr__(self):
return self.json_response
class ModelResponse(Response):
def __init__(self, status, model, raw_instance: Union[list, tuple], message="", action=""):
if not isinstance(model, models.Model):
model = model()
self.model = model
if raw_instance and not isinstance(raw_instance[0], tuple):
raw_instance = [raw_instance]
self.raw_instance = raw_instance
data = self.create_data()
super(ModelResponse, self).__init__(status, message, data=data, action=action)
def get_record(self, instance, table_schema):
return {col_name: val for col_name, val in zip(table_schema, instance) if
col_name not in self.model.write_only}
def create_data(self):
table_schema = self.model.get_table_cols()
data = []
for instance in self.raw_instance:
single_obj_data = self.get_record(instance, table_schema)
data.append(single_obj_data)
return data
|
{"/controllers.py": ["/models.py", "/utils.py"], "/models.py": ["/settings.py", "/utils.py"], "/server.py": ["/message.py", "/utils.py", "/controllers.py"], "/message.py": ["/utils.py"], "/utils.py": ["/models.py", "/settings.py"], "/tests.py": ["/models.py", "/utils.py", "/controllers.py", "/message.py", "/settings.py", "/backend/server.py", "/core/controllers.py", "/core/messages.py"], "/backend/crypto.py": ["/settings.py"], "/core/models.py": ["/settings.py", "/utils.py"], "/runserver.py": ["/utils.py", "/backend/server.py", "/settings.py"], "/backend/server.py": ["/settings.py", "/utils.py"], "/core/controllers.py": ["/settings.py", "/utils.py", "/core/messages.py"], "/core/messages.py": ["/utils.py"]}
|
30,579,475
|
DanielKusyDev/proton
|
refs/heads/develop
|
/tests.py
|
import abc
import base64
import json
import os
import socket
import sqlite3
import ssl
import threading
import unittest
import shutil
import settings
from backend import crypto
from backend.server import Server
from core import models
import utils
from core.controllers import Controller
from core.messages import Request, Response, ModelResponse
class CryptographyTestCase(unittest.TestCase):
def setUp(self) -> None:
self.plain = "test123123"
def test_key_generation(self):
key1 = crypto.generate_key()
key2 = crypto.generate_key()
self.assertEqual(key1, key2)
def test_encryption(self):
cipher = crypto.encrypt(self.plain)
self.assertNotEqual(self.plain, cipher)
cipher2 = crypto.encrypt(self.plain)
self.assertNotEqual(cipher, cipher2)
def test_decryption(self):
cipher = crypto.encrypt(self.plain)
decrypted_cipher = crypto.decrypt(cipher)
self.assertEqual(decrypted_cipher, self.plain)
cipher2 = crypto.encrypt(self.plain)
decrypted_cipher2 = crypto.decrypt(cipher2)
self.assertEqual(decrypted_cipher, decrypted_cipher2)
def test_comparison(self):
cipher = crypto.encrypt(self.plain)
self.assertTrue(crypto.compare(self.plain, cipher))
class BaseControllerTest(unittest.TestCase, metaclass=abc.ABCMeta):
def setUp(self) -> None:
self.db_name = "test.db"
with open("requests.json", "r") as file:
self.requests = json.loads(file.read())
utils.create_db(self.db_name)
self.user_model = models.User(self.db_name)
self.auth_token_model = models.AuthToken(self.db_name)
self.post_model = models.Post(self.db_name)
def tearDown(self) -> None:
os.remove(self.db_name)
class ModelTests(BaseControllerTest):
def setUp(self) -> None:
super(ModelTests, self).setUp()
self.user_data = {
"username": "test_username",
"password": "test_pass"
}
def test_user_creation(self):
users_counter = len(self.user_model.all())
user = self.user_model.create(**self.user_data)
self.assertIsInstance(user, tuple)
self.assertEqual(self.user_data["username"], user[1])
self.assertTrue(crypto.compare(self.user_data["password"], user[2]))
self.assertGreater(len(self.user_model.all()), users_counter)
def test_user_deletion(self):
users_counter = len(self.user_model.all())
user = self.user_model.create(username="test123123", password="test53525")
self.user_model.delete(id=user[0])
self.assertEqual(len(self.user_model.all()), users_counter)
def test_user_update(self):
user = self.user_model.create(**self.user_data)
new_username = "newusername123123"
updated_user = self.user_model.update(data={"username": new_username}, where={"id": user[0]})
self.assertEqual(new_username, updated_user[1])
with self.assertRaises(sqlite3.OperationalError):
updated_user = self.user_model.update(data={}, where={"id": user[0]})
def test_select(self):
self.assertListEqual(self.user_model.all(), [])
user = self.user_model.create(**self.user_data)
self.assertEqual(len(self.user_model.all()), 1)
self.assertEqual(user[0], self.user_model.first(id=user[0])[0])
self.assertEqual(user[0], self.user_model.first(id=user[0])[0])
self.assertListEqual(self.user_model.filter(username="wrongusernameforsure", password="wrongpass"), [])
with self.assertRaises(sqlite3.OperationalError):
self.user_model.filter(x="d")
def test_auth_token_creation(self):
user = self.user_model.create(**self.user_data)
auth_token = self.auth_token_model.create(user_id=user[0])
self.assertEqual(user[0], auth_token[1])
is_valid = self.auth_token_model.is_valid(user_id=auth_token[0])
self.assertTrue(is_valid)
with self.assertRaises(utils.ProtonError):
is_valid = self.auth_token_model.is_valid(user_id=123123123)
class MessageTests(BaseControllerTest):
def setUp(self) -> None:
super(MessageTests, self).setUp()
self.proper_request = """{"action":"register", "params":{"username":"...", "password":"..."}}"""
self.message = Request(self.proper_request)
def test_deserialization(self):
request = """{
"action": "",
"""
self.message.json_string = request
with self.assertRaises(json.JSONDecodeError):
self.message.deserialize_json()
def test_getting_action(self):
self.message.obj["action"] = "nonexistingactionfortests"
with self.assertRaises(AssertionError):
self.message.get_action()
def test_required_params(self):
# delete required parameter "username"
del self.message.obj["params"]["username"]
with self.assertRaises(AssertionError):
self.message.get_params()
def test_empty_params(self):
# remove params from message object
del self.message.obj["params"]
with self.assertRaises(AssertionError):
self.message.get_params()
# remove params from required_params field of message
self.message.required_action_params[self.message.action] = None
self.assertIsNone(self.message.get_params())
class ControllerTests(BaseControllerTest):
media_root = "test_assets"
image_str = os.path.join(media_root, "corgi.jpeg")
@classmethod
def tearDownClass(cls) -> None:
assets = os.listdir(cls.media_root)
for path in assets:
if path != "corgi.jpeg":
os.remove(os.path.join(cls.media_root, path))
def setUp(self) -> None:
super(ControllerTests, self).setUp()
self.controller = Controller(None, self.db_name)
def get_token(self, token_instance):
token_id = token_instance.data[0]["id"]
token_instance = self.auth_token_model.last(id=token_id)[2]
return token_instance
def _login(self, request, create_user=True):
if create_user:
self._request_action(self.requests[0])
token_instance = self._request_action(self.requests[1])
token = self.get_token(token_instance)
self.controller.auth_token = token
return request
def _request_action(self, request):
raw_request = json.dumps(request)
message = Request(raw_request)
result = getattr(self.controller, message.action)(message)
return result
def test_auth_validation(self):
# try to access controller method with bound validation auth without providing any
with self.assertRaises(PermissionError):
self._request_action(self.requests[2])
def test_register(self):
request = self.requests[0]
number_of_users = len(self.user_model.all())
result = self._request_action(request)
self.assertIsInstance(result, ModelResponse)
self.assertGreater(len(result.data), number_of_users)
self.assertEqual(request["params"]["username"], result.data[0]["username"])
self.assertNotEqual(request["params"]["password"], result.data[0]["username"])
def test_getting_token(self):
user = self._request_action(self.requests[0])
token = self.controller._get_token(user.data[0]["id"])
self.assertIsInstance(token, tuple)
self._request_action(self.requests[1])
token = self.controller._get_token(user.data[0]["id"])
self.assertIsInstance(token, tuple)
def test_login(self):
user = self._request_action(self.requests[0])
request = self.requests[1].copy()
# check valid login
result = self._request_action(request)
self.assertIsInstance(result, ModelResponse)
is_valid = self.auth_token_model.is_valid(user_id=user.data[0]["id"])
self.assertTrue(is_valid)
# check invalid login data
request["params"]["username"] = "wrongusername"
result = self._request_action(request)
self.assertEqual(result.status, "ERROR")
def test_proper_logout(self):
user = self._request_action(self.requests[0])
token = self._request_action(self.requests[1])
logout_request = self.requests[2].copy()
self.controller.auth_token = self.get_token(token)
# check if token does not exist anymore
self._request_action(logout_request)
self.assertIsNone(self.auth_token_model.first(user_id=user.data[0]["id"]))
# test attempt of providing invalid token and lack of token in opts field
with self.assertRaises(PermissionError):
self._request_action(logout_request)
logout_request = self.requests[2].copy()
self._request_action(logout_request)
def _create_post(self, create_user=True):
request = self._login(self.requests[3], create_user)
request["params"]["image"] = self.image_str
response = self._request_action(request)
return response
def test_create_full_data_post(self):
response = self._create_post()
self.assertTrue(response.status)
def test_getting_post_by_id(self):
self._create_post()
request = self._login(self.requests[5], False)
response = self._request_action(request)
self.assertIsInstance(response, ModelResponse)
self.assertTrue(response.status)
def test_getting_post(self):
self._create_post(True)
self._create_post(False)
request = self._login(self.requests[4], False)
response = self._request_action(request)
self.assertIsInstance(response, ModelResponse)
self.assertEqual(len(response.data), 2)
def test_post_modify(self):
post = self._create_post()
request = self.requests[6]
title = "NEWTITLE"
request["params"]["title"] = title
request = self._login(request, False)
response = self._request_action(request)
self.assertIsInstance(response, ModelResponse)
self.assertNotEqual(post.data[0]["title"], response.data[0]["title"])
self.assertEqual(title, response.data[0]["title"])
def test_post_deletion(self):
post = self._create_post()
request = self._login(self.requests[7], False)
response = self._request_action(request)
self.assertIsInstance(response, Response)
self.assertListEqual(self.post_model.all(), [])
class ThreadedServer(threading.Thread):
def run(self) -> None:
server = Server(("localhost", 1234))
server.runserver()
|
{"/controllers.py": ["/models.py", "/utils.py"], "/models.py": ["/settings.py", "/utils.py"], "/server.py": ["/message.py", "/utils.py", "/controllers.py"], "/message.py": ["/utils.py"], "/utils.py": ["/models.py", "/settings.py"], "/tests.py": ["/models.py", "/utils.py", "/controllers.py", "/message.py", "/settings.py", "/backend/server.py", "/core/controllers.py", "/core/messages.py"], "/backend/crypto.py": ["/settings.py"], "/core/models.py": ["/settings.py", "/utils.py"], "/runserver.py": ["/utils.py", "/backend/server.py", "/settings.py"], "/backend/server.py": ["/settings.py", "/utils.py"], "/core/controllers.py": ["/settings.py", "/utils.py", "/core/messages.py"], "/core/messages.py": ["/utils.py"]}
|
30,621,486
|
josemariaaynat/CRYPTO
|
refs/heads/main
|
/movements/__init__.py
|
from flask import Flask
app= Flask(__name__,instance_relative_config=True)
app.config.from_object('config')
from movements import views
|
{"/movements/views.py": ["/movements/__init__.py", "/movements/forms.py"], "/movements/acciones.py": ["/movements/__init__.py"]}
|
30,621,487
|
josemariaaynat/CRYPTO
|
refs/heads/main
|
/movements/acciones.py
|
from movements import app
import sqlite3
import time
import datetime
DBFILE = app.config['DBFILE']
def busqueda(query, params=()):
conn = sqlite3.connect(DBFILE)
cursor = conn.cursor()
cursor.execute(query, params)
conn.commit()
registros = cursor.fetchall()
conn.close()
listaDeDiccionarios=consulta(registros,cursor)
return listaDeDiccionarios
def consulta(registros,cursor):
if len(registros) == 0:
return registros
columnNames = []
for columnName in cursor.description:
columnNames.append(columnName[0])
listaDeDiccionarios = []
for fila in registros:
d = {}
for ix, columnName in enumerate(columnNames):
d[columnName] = fila[ix]
listaDeDiccionarios.append(d)
return listaDeDiccionarios
def hora():
hora=time.strftime("%X")
return hora
def fecha():
fecha=datetime.date.today()
return fecha
|
{"/movements/views.py": ["/movements/__init__.py", "/movements/forms.py"], "/movements/acciones.py": ["/movements/__init__.py"]}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.