index
int64 | repo_name
string | branch_name
string | path
string | content
string | import_graph
string |
|---|---|---|---|---|---|
2,869
|
SwannSG/womansSheltersZApython
|
refs/heads/master
|
/automate.py
|
#!/usr/bin/python3
"""
automate
<arbitrary>
topojson
gzip
"""
import subprocess
import geoJsonAddPropName
import geoJsonChgPropName
import geoJsonDelPropName
DST_DIR = '/home/swannsg/development/womansSheleterPy/data/geoJson'
PROVINCES = ['EC', 'FS', 'KN', 'LIM', 'MP', 'NC', 'NW', 'WC']
PROVINCES = ['WC']
# CONFIG: add, chg, del feature properties as required
geoJsonAddPropName.PKL = '/home/swannsg/development/womansSheleterPy/data/femalePopulationFromKirsty/female18-120.pkl'
geoJsonChgPropName.CHANGE_PROP_NAME = []
geoJsonDelPropName.DEL = []
for province in PROVINCES:
fn_in = DST_DIR + '/' + province + '/merge/' + province + 'merged.geojson'
fn_temp = DST_DIR + '/' + province + '/merge/' + province + 'merged.geojson' + '.bak'
# backup the file
subprocess.call(['cp', fn_in, fn_temp])
print ('working with', fn_temp)
"""
# add, chg, del feature properties as required
geoJsonChgPropName.SRC_FILE = fn_temp
geoJsonDelPropName.SRC_FILE = fn_temp
geoJsonAddPropName.SRC_FILE = fn_temp
print ('chg properties')
geoJsonChgPropName.chg()
print ('delete properties')
geoJsonDelPropName.delete()
print ('add properties')
geoJsonAddPropName.add()
# end add, chg, del feature properties as required
# topojson
print ('topojson')
subprocess.call(['rm', fn_temp + '.topojson'])
cmd = 'geo2topo ' + fn_temp + ' > ' + fn_temp + '.topojson'
print(subprocess.run(cmd, stdout=subprocess.PIPE, shell=True))
"""
# gzip
subprocess.run(['rm', fn_temp + '.topojson.zip'])
subprocess.run(['zip', fn_temp + '.topojson.zip', fn_temp + '.topojson'])
|
{"/automate.py": ["/geoJsonAddPropName.py", "/geoJsonChgPropName.py", "/geoJsonDelPropName.py"], "/multiFilesKmlToJson.py": ["/kmlToJson.py", "/mergeGeoJsonFiles.py"]}
|
2,870
|
SwannSG/womansSheltersZApython
|
refs/heads/master
|
/sheltersCSVtoGeoJson.py
|
"""
shelters.csv to geoJson
"""
import pprint
import json
CSV = '/home/swannsg/development/womansSheleterPy/data/sheltersFromKirsty/Western Cape Shelters GPS coordinates.csv'
OUT = '/home/swannsg/development/womansSheleterPy/data/geoJson/WC/shelters/WCshelters.geojson'
result= {}
result['type'] = 'FeatureCollection'
result['name'] = 'WC Shelters'
result['features'] = []
fp = open(CSV, 'r')
for i, each in enumerate(fp):
if i == 0:
# ignore first line
continue
each.replace('\n', '')
area, name, lat, lng, num = each.split(',')
# init feature
feature = {'type':'Feature',
'geometry': {'coordinates': [], "type": 'Point'},
'properties': {'area': '', 'name': ''}}
# set values in feature
feature['geometry']['coordinates'] = [float(lng.replace('"', '')),
float(lat.replace('"', ''))]
feature['properties']['area'] = area
feature['properties']['name'] = name
# add to features
result['features'].append(feature)
fp.close()
fp = open(OUT, 'w')
json.dump(result, fp)
fp.close()
|
{"/automate.py": ["/geoJsonAddPropName.py", "/geoJsonChgPropName.py", "/geoJsonDelPropName.py"], "/multiFilesKmlToJson.py": ["/kmlToJson.py", "/mergeGeoJsonFiles.py"]}
|
2,871
|
SwannSG/womansSheltersZApython
|
refs/heads/master
|
/mapWardIdtoMunicipalName.py
|
"""
map wardId to municipality name
input file: any ward geojson file
"""
import json
import pickle
import pprint
SRC_FILE = '/home/swannsg/development/womansSheleterPy/data/geoJson/WC/merge/WCmerged.geojson'
PICKLE_FILE = '/home/swannsg/development/womansSheleterPy/data/sundryStuff/wardId_munName.pkl'
fp = open(SRC_FILE, 'r')
x = json.load(fp)
fp.close()
fp = open(PICKLE_FILE, 'rb')
result = pickle.load(fp)
fp.close()
for each in x['features']:
result[each['properties']['WardID']] = [
each['properties']['Province'],
each['properties']['MunicName'],
]
fp = open(PICKLE_FILE, 'wb')
pickle.dump(result,fp)
fp.close()
# pprint.pprint(result)
|
{"/automate.py": ["/geoJsonAddPropName.py", "/geoJsonChgPropName.py", "/geoJsonDelPropName.py"], "/multiFilesKmlToJson.py": ["/kmlToJson.py", "/mergeGeoJsonFiles.py"]}
|
2,872
|
SwannSG/womansSheltersZApython
|
refs/heads/master
|
/multiFilesKmlToJson.py
|
"""
convert multiple kml files to geojson format
PROVINCE: set to the province eg. WC
FILES_TO_IGNORE: files in SRC_DIR that should not be converted
SRC_DIR: contains multiple kml files
DST_DIR: where kml to geojson result files are placed
"""
import os
import kmlToJson
import mergeGeoJsonFiles
# edit to process a province
PROVINCE = 'NW'
FILES_TO_IGNORE = ['EC.kml', 'FS.kml', 'KZN.kml', 'LIM.kml',
'MP.kml', 'NC.kml', 'NW.kml', 'WC.kml', 'KZN_KML_Files.zip']
# end edit to process a province
# edit for global dirs
SRC_DIR = '/home/swannsg/development/womansSheleterPy/data/kml'
DST_DIR = '/home/swannsg/development/womansSheleterPy/data/geoJson'
KML_TO_GEOJSON = True
MERGE_FILES = True
# end edit for global dirs
src_dir = SRC_DIR + '/' + PROVINCE
dst_dir = DST_DIR + '/' + PROVINCE
# create dirs if they don't exist
if not os.path.exists(dst_dir):
os.makedirs(dst_dir)
# end create dirs if they don't exist
# get files and dirs in SRC_DIR
files_dirs = os.listdir(src_dir)
# remove names that are dirs from files_dirs
for each in files_dirs:
if not os.path.isfile(src_dir + '/' + each):
files_dirs.remove(each)
# remove filenames that are NOT to be processed
for each in files_dirs:
try:
if FILES_TO_IGNORE.index(each) >= 0:
# filename must be removed
files_dirs.remove(each)
except:
pass
# map kml files to geoJson
if KML_TO_GEOJSON:
for each in files_dirs:
print (src_dir + '/' + each, dst_dir)
kmlToJson.runKmlToJson(src_dir + '/' + each, dst_dir)
# end map kml files to geoJson
# merge geoJson files
if MERGE_FILES:
# ---get files and dirs in dst_dir
files_dirs = os.listdir(dst_dir)
# --remove names that are dirs from files_dirs
for each in files_dirs:
if not os.path.isfile(dst_dir + '/' + each):
files_dirs.remove(each)
# create dirs if they don't exist
if not os.path.exists(DST_DIR + '/' + PROVINCE + '/merge'):
os.makedirs(DST_DIR + '/' + PROVINCE + '/merge')
# end create dirs if they don't exist
mergeGeoJsonFiles.mergeGeoJsonFiles([dst_dir + '/' + a for a in files_dirs],
dst_dir + '/merge/' + PROVINCE + 'merged.geojson')
# end merge geoJson files
|
{"/automate.py": ["/geoJsonAddPropName.py", "/geoJsonChgPropName.py", "/geoJsonDelPropName.py"], "/multiFilesKmlToJson.py": ["/kmlToJson.py", "/mergeGeoJsonFiles.py"]}
|
2,873
|
SwannSG/womansSheltersZApython
|
refs/heads/master
|
/csvFem18-120.py
|
"""
Statistics South Africa
Descriptive_Electoral_Wards
Table 1
Geography by Gender
" for Person weighted, 18 - 120"
,"Male","Female","Grand Total"
Females 18 to 120
output = {wardID: {f18-120: <number>}}
result is pickled
"""
import pickle
filename = "/home/swannsg/development/womansSheleterPy/data/femalePopulationFromKirsty/sourceData/Whole of SA women's population 18 and upwards - most detailed with codes no names.csv"
pkl = '/home/swannsg/development/womansSheleterPy/data/femalePopulationFromKirsty/female18-120.pkl'
result = {}
start = False
fp = open(filename, 'r')
i = 0
for each in fp:
# print (i)
if each == ',"Male","Female","Grand Total"\n':
start = True
continue
if start:
a,b,c,d = each.split(',')
if a == '"Grand Total"':
break
a = a.replace('"', '')
result[a.split(':')[0]] = {'f18-20': int(c)}
i = i + 1
fp.close()
fp = open(pkl, 'wb')
pickle.dump(result, fp)
fp.close()
|
{"/automate.py": ["/geoJsonAddPropName.py", "/geoJsonChgPropName.py", "/geoJsonDelPropName.py"], "/multiFilesKmlToJson.py": ["/kmlToJson.py", "/mergeGeoJsonFiles.py"]}
|
2,874
|
SwannSG/womansSheltersZApython
|
refs/heads/master
|
/geoJsonDelPropName.py
|
"""
geoJsonDelPropName.py
feature.properties = {key_1: value_1, ...}
Delete key_N from feature.properties
"""
import json
import pickle
import pprint
SRC_FILE = '/home/swannsg/development/womansSheleterPy/data/geoJson/WC/merge/WCmergedTest.geojson'
DEL = ['females']
def delete():
fp = open(SRC_FILE, 'r')
x = json.load(fp)
fp.close()
# del properties
for feature in x['features']:
for each in DEL:
feature['properties'].pop(each, None)
# show result
#for each in x['features']:
# pprint.pprint(each['properties'])
fp = open(SRC_FILE, 'w')
json.dump(x, fp)
fp.close()
|
{"/automate.py": ["/geoJsonAddPropName.py", "/geoJsonChgPropName.py", "/geoJsonDelPropName.py"], "/multiFilesKmlToJson.py": ["/kmlToJson.py", "/mergeGeoJsonFiles.py"]}
|
2,893
|
nimotsu/stock
|
refs/heads/master
|
/scrape.py
|
#!/usr/bin/env python
# coding: utf-8
import sys
import xlsxwriter
import datetime
from stock import Stock
from stock import Webpage
import os
import numpy as np
import warnings
warnings.filterwarnings("ignore")
now = datetime.datetime.now()
def search(term: str, df, index = 1):
result = df[df[0].str.contains('(?i)' + term)][index].values[0]
result = str(result)
result = result.replace(",", "")
if '%' in result:
result = result.replace("%", "")
result = float(result) / 100
try:
return float(result)
except:
return result
def rename_excel(my_stock, excel_name):
"""rename excel sheet with npv and last price for easy viewing"""
operating_cf = search("Cash From Operating Activities", my_stock.cash_flow)
shares_outstanding = search("Shares Outstanding", my_stock.overview)/1000000
last_price = search("Last Price", my_stock.overview)
cash_flow = []
for i in range(1, 11):
operating_cf = operating_cf * (1 + my_stock.growth_rate)
cash_flow.append(operating_cf)
values = cash_flow
rate = my_stock.discount_rate
npv = (values / (1+rate)**np.arange(1, len(values)+1)).sum(axis=0) / shares_outstanding
print(f"NPV per Share: {npv}")
print(f"Last Price: {last_price}")
os.rename(excel_name, my_stock.stock_cd + "-" + str(round(npv, 2)) + "-" + str(last_price) + ".xlsx")
def analyse(company_name):
my_stock = Stock(company_name)
excel_name = "stocks/" + my_stock.stock_cd + ".xlsx"
sheet_name = "Sheet1"
# colours
blue = '#98C4D1'
yellow = '#FEC240'
red = '#DE4B43'
# writer = pd.ExcelWriter(excel_name, engine='xlsxwriter') writer.save()
workbook = xlsxwriter.Workbook(excel_name)
worksheet = workbook.add_worksheet(sheet_name)
# format excel
worksheet.set_row(0, 40)
worksheet.set_column('A:A', 20)
worksheet.set_column('A:I', 10)
title_format = workbook.add_format({
'bold': True,
'font_color': blue,
'font_size': 16
})
currency_format = workbook.add_format({
'num_format': '$#,##0.00',
'border': 1
})
percentage_format = workbook.add_format({
'num_format': '0.0%',
'bg_color': blue,
'border': 1
})
colored_format = workbook.add_format({
'bg_color': blue,
'border': 1
})
colored_currency_format = workbook.add_format({
'num_format': '$#,##0.00',
'bg_color': blue,
'border': 1
})
border_format = workbook.add_format({
'border': 1
})
# Stock and write to excel
# Required data for npv calculation, table 1
# --------------------------------------------------------
table01 = (0, 0)
table1 = {
"Name of Stock": my_stock.stock_cd.replace("-", " ").title(),
"Operating Cash Flow": search("Cash From Operating Activities", my_stock.cash_flow),
"Total Debt": search("Total Long Term Debt", my_stock.balance_sheet),
"Cash & Equivalent": search("Cash & Equivalent", my_stock.balance_sheet),
"Growth Rate": 0,
"No. of Shares Outstanding": search("Shares Outstanding", my_stock.overview) / 1000000,
"Discount Rate": 0
}
worksheet.write_column('A1', table1.keys(), border_format)
worksheet.write_column('B1', table1.values(), colored_currency_format)
# rewrite in title and percentage format
worksheet.write('B1', my_stock.stock_cd.replace("-", " ").title(), title_format)
worksheet.write('B5', my_stock.growth_rate, percentage_format)
worksheet.write('B7', my_stock.discount_rate, percentage_format)
# Ten-year cash flow calculations, bottom table
# --------------------------------------------------------
table11 = (11, 0)
calc_row = table11[0]
# headers
worksheet.write_column(calc_row, 0, ["Year", "Cash Flow", "Discount Rate", "Discounted Value"], border_format)
worksheet.write_row(calc_row, 1, list(range(now.year, now.year + 10, 1)), border_format)
# calculation formulas
cash_flow = ["=B2*(1+B5)"]
cash_flow.extend(["=" + chr(ord('B') + i) + str(calc_row+2) + "*(1+$B$5)" for i in range(10)])
# +1, +2
cf_row = calc_row + 1
for i in range(10):
worksheet.write_formula(cf_row, i+1, cash_flow[i], currency_format)
# +2, +3
dr_row = calc_row + 2
discount_rate = ["=1/(1 + $B$7)^" + str(i) for i in range(1, 11)]
for i in range(10):
worksheet.write_formula(dr_row, i+1, discount_rate[i], border_format)
# +3, +4
dv_row = calc_row + 3
discounted_value = ["=PRODUCT("+chr(ord('B')+i)+str(cf_row+1)+":"+chr(ord('B')+i)+str(dr_row+1)+")" for i in range(10)]
for i in range(10):
worksheet.write_formula(dv_row, i+1, discounted_value[i], currency_format)
# NPV and intrinsic values calculations, table 2
# --------------------------------------------------------
# table02 = ()
worksheet.write_column('D2', ["PV of 10 yr Cash Flows", "Intrinsic Value per Share",
"- Debt per Share", "+ Cash per share", "net Cash per Share"], border_format)
worksheet.write_column('E2', [f"=SUM(B{dv_row+1}:K{dv_row+1})", "=E2/B6", "=B3/B6", "=B4/B6", "=E3-E4+E5"], colored_currency_format)
# Stock overview, table 3
# --------------------------------------------------------
# table03 = ()
df = my_stock.overview.reset_index(drop=True)
index = [0, 5, 6, 7, 8, 9, 11, 15]
worksheet.write_column('G2', df.iloc[index, 0], border_format)
worksheet.write_column('H2', df.iloc[index, 1], colored_format)
# Jot down links from simply wall st and infront analytics
# --------------------------------------------------------
row = table11[0] + 5
worksheet.write_column(row, 0, my_stock.urls)
# Overview by i3investor
# --------------------------------------------------------
i3summary = my_stock.i3summary
i3business_performance = my_stock.i3business_performance
# i3investor table, table 4
# table04 = ()
i3summary_column = 9
worksheet.set_column(i3summary_column, i3summary_column+1, 20) # Width of column B set to 30.
worksheet.write_column(1, i3summary_column, i3summary[0], border_format)
worksheet.write_column(1, i3summary_column+1, i3summary[1], colored_currency_format)
# summary tables
start_row = 23
start_column = 0
for key in i3business_performance:
worksheet.write(start_row, start_column, key)
cur_df = i3business_performance[key]
for col in cur_df.columns:
cur_col = []
cur_col.append(col.replace("Unnamed: 0", ""))
cur_col.extend(cur_df[col])
worksheet.write_column(start_row+1, start_column, cur_col, border_format)
start_column += 1
start_column += 1
# Ratios by investing, table 5
# --------------------------------------------------------
# table05
start_row = 1
start_column = 12
ratios_header = my_stock.ratios.head(6)
ratios_header = ratios_header.rename({0: '', 1: 'Company', 2: 'Industry'}, axis=1)
for col in ratios_header.columns:
cur_col = []
cur_col.append(col)
cur_col.extend(ratios_header[col])
worksheet.write_column(start_row, start_column, cur_col, border_format)
start_column += 1
total_assets = search("Total Assets", my_stock.balance_sheet)
total_liabilities = search("Total Liabilities", my_stock.balance_sheet)
current_shares_outstanding = search("Total Common Shares Outstanding", my_stock.balance_sheet)
total_equity = search("Total Equity", my_stock.balance_sheet)
net_assets = total_assets - total_liabilities
net_asset_value = net_assets / current_shares_outstanding
net_asset_value = round(net_asset_value, 2)
table5 = {
"EPS": search("Basic EPS ANN", my_stock.ratios),
"EPS(MRQ) vs Qtr. 1 Yr. Ago MRQ": search("EPS\(MRQ\) vs Qtr. 1 Yr. Ago MRQ", my_stock.ratios),
"EPS(TTM) vs TTM 1 Yr. Ago TTM": search("EPS\(TTM\) vs TTM 1 Yr. Ago TTM", my_stock.ratios),
"5 Year EPS Growth 5YA": search("5 Year EPS Growth 5YA", my_stock.ratios),
"Return on Equity TTM": search("Return on Equity TTM", my_stock.ratios),
"Return on Equity 5YA": search("Return on Equity 5YA", my_stock.ratios),
"Price to Earnings Ratio": search("P/E Ratio TTM", my_stock.ratios),
"Dividend per Share": search("Dividend Yield ANN", my_stock.ratios),
"Dividend Yield 5 Year Avg. 5YA": search("Dividend Yield 5 Year Avg. 5YA", my_stock.ratios),
"Dividend Growth Rate ANN": search("Dividend Growth Rate ANN", my_stock.ratios),
"Net Asset per Share": net_asset_value,
"Price to Book": search("Price to Book MRQ", my_stock.ratios),
"LT Debt to Equity": search("LT Debt to Equity", my_stock.ratios)
}
# Continuation, table 5
start_row = len(ratios_header) + 2
start_column = 12
worksheet.write_column(start_row, start_column, table5.keys(), border_format)
worksheet.write_column(start_row, start_column+1, table5.values(), colored_format)
workbook.close()
# Shift + Ctrl + F9
rename_excel(my_stock, excel_name)
def main():
# print('Number of arguments: {}'.format(len(sys.argv[1:])))
# print('Argument(s) passed: {}'.format(str(sys.argv[1:])))
companies = sys.argv[1:]
list(map(lambda x: analyse(x),companies))
if __name__ == "__main__":
main()
|
{"/scrape.py": ["/stock.py"]}
|
2,894
|
nimotsu/stock
|
refs/heads/master
|
/stock.py
|
import requests
import pandas as pd
import re
from bs4 import BeautifulSoup
def url2html(url, headers=None, params=None, data=None):
headers={'User-Agent': 'Mozilla/5.0'}
try:
req = requests.get(url, headers=headers, params=params, data=data)
except:
req = requests.get(url, headers=headers, params=params, data=data, verify=False)
html = req.text
return html
# Handle all urls and htmls
class Webpage:
def __init__(self, html):
self.html = html
self.soup = BeautifulSoup(self.html, 'html.parser')
try:
self.tables = pd.read_html(self.html)
except:
self.tables = None
@classmethod
def from_url(cls, url, headers=None, params=None, data=None):
"""constructor with url"""
html = url2html(url, headers)
return cls(html)
def get_span(self, tag: str, class_name: list):
"""return df from columns not in <table>"""
def get_tag(tag, class_name):
tags = self.soup.find_all(tag, {'class': class_name})
text = [i.get_text() for i in tags if i.get_text() != '']
return text
attrib = get_tag(tag, class_name[0])
data = get_tag(tag, class_name[1])
ls = list(zip(attrib, data))
df = pd.DataFrame(ls)
return df
# Handle all methods related to stock
class Stock:
def __init__(self, company):
self.urls = []
self.stock_cd = self.scrape_link(company)
print(f"Stock Cd: {self.stock_cd}")
self.overview, self.stock_id, investing_url = self.scrape_overview()
print(f"Stock Id: {self.stock_id}")
self.growth_rate, simplywallst_url = self.scrape_growth_rate()
self.beta, infrontanalytics_url = self.scrape_beta()
self.discount_rate = self.scrape_discount_rate()
self.i3summary, self.i3business_performance, i3investor_url = self.scrape_isummary()
self.urls.append(investing_url)
self.urls.append(simplywallst_url)
self.urls.append(infrontanalytics_url)
self.urls.append(i3investor_url)
self.ratios = self.scrape_ratios()
self.cash_flow = self.scrape_cash_flow()
self.balance_sheet = self.scrape_balance_sheet()
# self.income_statementp = Webpage.from_url(f"https://www.investing.com/equities/{stock_cd}-income-statement")
# self.earningsp = Webpage.from_url(f"https://www.investing.com/equities/{stock_cd}-earnings")
# self.financialp = Webpage.from_url(f"https://www.investing.com/equities/{stock_cd}-financial-summary")
def scrape_link(self, company):
headers = {
'User-Agent': 'Mozilla/5.0',
}
params = (
('q', company),
)
response = requests.get('https://www.investing.com/search/', headers=headers, params=params)
soup = BeautifulSoup(response.text)
result = soup.find('a', ['js-inner-all-results-quote-item'])
stock_cd = result['href'].replace("/equities/", "")
return stock_cd
"""
Simply Wall St
"""
def scrape_growth_rate(self):
"""scrape growth rate from simply wall st"""
# search the link for stock
stock_cd = self.stock_cd.replace("-", " ")
params = (
('x-algolia-agent', 'Algolia for JavaScript (4.2.0); Browser (lite)'),
('x-algolia-api-key', 'be7c37718f927d0137a88a11b69ae419'),
('x-algolia-application-id', '17IQHZWXZW'),
)
data = f'{{"query":"{stock_cd} klse","highlightPostTag":" ","highlightPreTag":" ","restrictHighlightAndSnippetArrays":true}}'
try:
response = requests.post('https://17iqhzwxzw-dsn.algolia.net/1/indexes/companies/query', params=params, data=data)
# generate link
stock_url = response.json()['hits'][0]['url']
url = "https://simplywall.st" + stock_url
except:
return None
html = url2html(url)
soup = BeautifulSoup(html, 'html.parser')
growth = soup.find('p', {'data-cy-id': 'key-metric-value-forecasted-annual-earnings-growth'}).get_text().replace('%', '')
self.growth_rate = float(growth) / 100
print(f"Growth Rate: {self.growth_rate}")
return self.growth_rate, url
"""
Infront Analytics
"""
def scrape_beta(self):
"""scrape beta from infrontanalytics.com"""
# search the link for stock
params = (
('keyname', self.stock_cd.replace("-", " ")),
)
response = requests.get('https://www.infrontanalytics.com/Eurofin/autocomplete', params=params, verify=False)
result = response.json()[0]
# generate stock url
name = result['name'].replace(" ", "-").replace(".", "") + "-"
code = result['isin']
url = f"https://www.infrontanalytics.com/fe-en/{code}/{name}/beta"
# get beta
html = url2html(url)
m = re.search(r"shows a Beta of ([+-]?\d+\.\d+).", html)
beta = m.groups()[0]
print(f"Beta: {beta}")
return float(beta), url
def scrape_discount_rate(self):
"""convert beta to discount rate for dcf model"""
discount_rate = 0
dr = {
0.8: 5,
1: 6,
1.1: 6.8,
1.2: 7,
1.3: 7.9,
1.4: 8,
1.5: 8.9
}
for key in dr:
if self.beta <= key:
discount_rate = dr[key]
else:
discount_rate = 9
discount_rate = round(discount_rate/100, 2)
print(f"Discount Rate: {discount_rate}")
return discount_rate
"""
i3investor
"""
def scrape_isummary(self):
# search for link in the website
headers = {'User-Agent': 'Mozilla'}
params = (
('qt', 'lscomn'),
('qp', 'nestle'),
)
response = requests.get('https://klse.i3investor.com/cmservlet.jsp', headers=headers, params=params)
query = response.text.split(":")[0]
# generate link to stock page
params = (
('sa', 'ss'),
('q', query),
)
response = requests.get('https://klse.i3investor.com/quoteservlet.jsp', headers=headers, params=params)
# scrape for id from stock page
html = response.text
soup = BeautifulSoup(html)
stock_name = soup.find('span', {'class': 'stname'}).text
stock_name = re.search("\((\d+)\)", stock_name)
stock_id = stock_name.groups()[0]
# generate link to summary page
url = f"https://klse.i3investor.com/servlets/stk/fin/{stock_id}.jsp?type=summary"
html = url2html(url)
soup = BeautifulSoup(html)
# get all summary tables
result = soup.find_all('div', {'id': 'headerAccordion'})
i3summary = pd.read_html(str(result[3]))[0]
# get business performance tables
result = soup.find_all('div', {'id': 'summaryAccordion'})
business_performance_by_year = pd.read_html(str(result[1]))[0].dropna()
key_result = pd.read_html(str(result[2]))[0].dropna()
growth_by_year = pd.read_html(str(result[4]))[0].dropna()
i3business_performance = {
"Business Peformance (by Year)": business_performance_by_year,
"Key Result": key_result[['Annual (Unaudited)', 'Last 10 FY Average', 'Last 5 FY Average']],
"Growth (by Year)": growth_by_year[['LFY YoY', 'LFY vs AL5FY', 'LFY vs AL10FY']]
}
return i3summary, i3business_performance, url
"""
investing
"""
def scrape_overview(self):
stock_cd = self.stock_cd
def scrape_id(overviewp):
m = re.search('data-pair-id="(\d+)"', overviewp.html)
stock_id = m.groups()[0]
return stock_id
url = f"https://www.investing.com/equities/{stock_cd}"
overviewp = Webpage.from_url(url)
soup = overviewp.soup
last_price = soup.find('span', {'id':'last_last'}).get_text()
ls = ['Last Price', last_price]
df = pd.DataFrame([ls])
overview = overviewp.get_span('span', ['float_lang_base_1', 'float_lang_base_2'])
stock_id = scrape_id(overviewp)
return pd.concat([df, overview]), stock_id, url
def scrape_ratios(self):
stock_cd = self.stock_cd
ratiosp = Webpage.from_url(f"https://www.investing.com/equities/{stock_cd}-ratios")
tables = ratiosp.tables
numbers = range(1, 9)
ratios = pd.concat(tables[i] for i in numbers)
return ratios
def scrape_cash_flow(self):
stock_id = self.stock_id
cash_flowp = Webpage.from_url(f"https://www.investing.com/instruments/Financials/changereporttypeajax?action=change_report_type&pair_ID={self.stock_id}&report_type=CAS&period_type=Annual")
df = cash_flowp.tables[0]
cash_flow = df[~df[1].str.contains("a|e|i|o|u")]
return cash_flow
def scrape_balance_sheet(self):
stock_id = self.stock_id
balance_sheetp = Webpage.from_url(f"https://www.investing.com/instruments/Financials/changereporttypeajax?action=change_report_type&pair_ID={self.stock_id}&report_type=BAL&period_type=Annual")
df = balance_sheetp.tables[0]
balance_sheet = df[~df[1].str.contains("a|e|i|o|u")]
return balance_sheet
def scrape_earnings(self):
stock_cd = self.stock_cd
s = requests.Session()
url = f"https://www.investing.com/equities/{self.stock_cd}-earnings"
headers={ "User-Agent": "Mozilla/5.0"}
r = s.get(url, headers={ "User-Agent": "Mozilla/5.0"})
# get more history - to work on
'''
more_history = "https://www.investing.com/equities/morehistory"
headers = {
'User-Agent': 'Mozilla/5.0',
'X-Requested-With': 'XMLHttpRequest',
'Referer': url,
}
data = {"pairID" : "41688", "last_timestamp": "2019-0-02"}
r = s.post(more_history, headers=headers, cookies=r.cookies, data=data)
r.json()['historyRows']
'''
return r.text
def scrape_financial_summary(self):
def get_summary(html):
webpage = Webpage(html)
soup = webpage.soup
title = soup.find('h3').text
df = webpage.get_span('span', ['float_lang_base_1', 'float_lang_base_2'])
table = pd.read_html(str(soup))[0]
return [title, table, df] # pd.concat([table, df], axis=0, ignore_index=True)
stock_id = self.stock_id
financial_summary = f"https://www.investing.com/instruments/Financials/changesummaryreporttypeajax?action=change_report_type&pid={stock_id}&financial_id={stock_id}&ratios_id={stock_id}&period_type="
annual = financial_summary + "Annual"
# interim = financial_summary + "Interim"
df = pd.DataFrame()
soup = Webpage.from_url(annual).soup
sections = soup.find_all('div', "companySummaryIncomeStatement")
result = []
for i in sections:
result.append(get_summary(str(i)))
return result
'''
10% for public companies
15% for private companies that are scaling predictably (say above $10m in ARR, and growing greater than 40% year on year)
20% for private companies that have not yet reached scale and predictable growth
'''
|
{"/scrape.py": ["/stock.py"]}
|
2,925
|
gheinrich/DIGITS
|
refs/heads/master
|
/digits/dataset/tasks/__init__.py
|
# Copyright (c) 2014-2016, NVIDIA CORPORATION. All rights reserved.
from __future__ import absolute_import
from .analyze_db import AnalyzeDbTask
from .create_db import CreateDbTask
from .create_generic_db import CreateGenericDbTask
from .parse_folder import ParseFolderTask
|
{"/digits/model/tasks/test_caffe_train.py": ["/digits/model/tasks/__init__.py"], "/digits/model/images/classification/forms.py": ["/digits/model/images/forms.py"], "/digits/model/images/generic/forms.py": ["/digits/model/images/forms.py"]}
|
2,926
|
gheinrich/DIGITS
|
refs/heads/master
|
/digits/model/images/forms.py
|
# Copyright (c) 2014-2016, NVIDIA CORPORATION. All rights reserved.
from __future__ import absolute_import
import wtforms
from wtforms import validators
from ..forms import ModelForm
from digits import utils
class ImageModelForm(ModelForm):
"""
Defines the form used to create a new ImageModelJob
"""
crop_size = utils.forms.IntegerField('Crop Size',
validators = [
validators.NumberRange(min=1),
validators.Optional()
],
tooltip = "If specified, during training a random square crop will be taken from the input image before using as input for the network."
)
use_mean = utils.forms.SelectField('Subtract Mean',
choices = [
('none', 'None'),
('image', 'Image'),
('pixel', 'Pixel'),
],
default='image',
tooltip = "Subtract the mean file or mean pixel for this dataset from each image."
)
aug_flip = utils.forms.SelectField('Flipping',
choices = [
('none', 'None'),
('fliplr', 'Horizontal'),
('flipud', 'Vertical'),
('fliplrud', 'Horizontal and/or Vertical'),
],
default='none',
tooltip = "Randomly flips each image during batch preprocessing."
)
aug_quad_rot = utils.forms.SelectField('Quadrilateral Rotation',
choices = [
('none', 'None'),
('rot90', '0, 90 or 270 degrees'),
('rot180', '0 or 180 degrees'),
('rotall', '0, 90, 180 or 270 degrees.'),
],
default='none',
tooltip = "Randomly rotates (90 degree steps) each image during batch preprocessing."
)
aug_rot = utils.forms.IntegerField('Rotation (+- deg)',
default=0,
validators=[
validators.NumberRange(min=0, max=180)
],
tooltip = "The uniform-random rotation angle that will be performed during batch preprocessing."
)
aug_scale = utils.forms.FloatField('Rescale (stddev)',
default=0,
validators=[
validators.NumberRange(min=0, max=1)
],
tooltip = "Retaining image size, the image is rescaled with a +-stddev of this parameter. Suggested value is 0.07."
)
aug_noise = utils.forms.FloatField('Noise (stddev)',
default=0,
validators=[
validators.NumberRange(min=0, max=1)
],
tooltip = "Adds AWGN (Additive White Gaussian Noise) during batch preprocessing, assuming [0 1] pixel-value range. Suggested value is 0.03."
)
aug_hsv_use = utils.forms.BooleanField('HSV Shifting',
default = False,
tooltip = "Augmentation by normal-distributed random shifts in HSV color space, assuming [0 1] pixel-value range.",
validators=[
]
)
aug_hsv_h = utils.forms.FloatField('Hue',
default=0.02,
validators=[
validators.NumberRange(min=0, max=0.5)
],
tooltip = "Standard deviation of a shift that will be performed during preprocessing, assuming [0 1] pixel-value range."
)
aug_hsv_s = utils.forms.FloatField('Saturation',
default=0.04,
validators=[
validators.NumberRange(min=0, max=0.5)
],
tooltip = "Standard deviation of a shift that will be performed during preprocessing, assuming [0 1] pixel-value range."
)
aug_hsv_v = utils.forms.FloatField('Value',
default=0.06,
validators=[
validators.NumberRange(min=0, max=0.5)
],
tooltip = "Standard deviation of a shift that will be performed during preprocessing, assuming [0 1] pixel-value range."
)
|
{"/digits/model/tasks/test_caffe_train.py": ["/digits/model/tasks/__init__.py"], "/digits/model/images/classification/forms.py": ["/digits/model/images/forms.py"], "/digits/model/images/generic/forms.py": ["/digits/model/images/forms.py"]}
|
2,927
|
gheinrich/DIGITS
|
refs/heads/master
|
/digits/model/tasks/test_caffe_train.py
|
# Copyright (c) 2014-2016, NVIDIA CORPORATION. All rights reserved.
from __future__ import absolute_import
from . import caffe_train
from digits import test_utils
def test_caffe_imports():
test_utils.skipIfNotFramework('caffe')
import numpy
import google.protobuf
|
{"/digits/model/tasks/test_caffe_train.py": ["/digits/model/tasks/__init__.py"], "/digits/model/images/classification/forms.py": ["/digits/model/images/forms.py"], "/digits/model/images/generic/forms.py": ["/digits/model/images/forms.py"]}
|
2,928
|
gheinrich/DIGITS
|
refs/heads/master
|
/digits/model/tasks/__init__.py
|
# Copyright (c) 2014-2016, NVIDIA CORPORATION. All rights reserved.
from __future__ import absolute_import
from .caffe_train import CaffeTrainTask
from .torch_train import TorchTrainTask
from .train import TrainTask
|
{"/digits/model/tasks/test_caffe_train.py": ["/digits/model/tasks/__init__.py"], "/digits/model/images/classification/forms.py": ["/digits/model/images/forms.py"], "/digits/model/images/generic/forms.py": ["/digits/model/images/forms.py"]}
|
2,929
|
gheinrich/DIGITS
|
refs/heads/master
|
/digits/pretrained_model/tasks/__init__.py
|
# Copyright (c) 2016, NVIDIA CORPORATION. All rights reserved.
from __future__ import absolute_import
from .upload_pretrained import UploadPretrainedModelTask
from .caffe_upload import CaffeUploadTask
from .torch_upload import TorchUploadTask
|
{"/digits/model/tasks/test_caffe_train.py": ["/digits/model/tasks/__init__.py"], "/digits/model/images/classification/forms.py": ["/digits/model/images/forms.py"], "/digits/model/images/generic/forms.py": ["/digits/model/images/forms.py"]}
|
2,930
|
gheinrich/DIGITS
|
refs/heads/master
|
/digits/pretrained_model/__init__.py
|
# Copyright (c) 2016, NVIDIA CORPORATION. All rights reserved.
from __future__ import absolute_import
from .job import PretrainedModelJob
|
{"/digits/model/tasks/test_caffe_train.py": ["/digits/model/tasks/__init__.py"], "/digits/model/images/classification/forms.py": ["/digits/model/images/forms.py"], "/digits/model/images/generic/forms.py": ["/digits/model/images/forms.py"]}
|
2,931
|
gheinrich/DIGITS
|
refs/heads/master
|
/digits/model/images/classification/forms.py
|
# Copyright (c) 2014-2016, NVIDIA CORPORATION. All rights reserved.
from __future__ import absolute_import
import wtforms
from wtforms import validators
from ..forms import ImageModelForm
class ImageClassificationModelForm(ImageModelForm):
"""
Defines the form used to create a new ImageClassificationModelJob
"""
pass
|
{"/digits/model/tasks/test_caffe_train.py": ["/digits/model/tasks/__init__.py"], "/digits/model/images/classification/forms.py": ["/digits/model/images/forms.py"], "/digits/model/images/generic/forms.py": ["/digits/model/images/forms.py"]}
|
2,932
|
gheinrich/DIGITS
|
refs/heads/master
|
/digits/config/__init__.py
|
# Copyright (c) 2015-2016, NVIDIA CORPORATION. All rights reserved.
from __future__ import absolute_import
# Create this object before importing the following imports, since they edit the list
option_list = {}
from . import caffe
from . import gpu_list
from . import jobs_dir
from . import log_file
from . import torch
from . import server_name
from . import store_option
def config_value(option):
"""
Return the current configuration value for the given option
"""
return option_list[option]
|
{"/digits/model/tasks/test_caffe_train.py": ["/digits/model/tasks/__init__.py"], "/digits/model/images/classification/forms.py": ["/digits/model/images/forms.py"], "/digits/model/images/generic/forms.py": ["/digits/model/images/forms.py"]}
|
2,933
|
gheinrich/DIGITS
|
refs/heads/master
|
/digits/model/images/generic/forms.py
|
# Copyright (c) 2015-2016, NVIDIA CORPORATION. All rights reserved.
from __future__ import absolute_import
import wtforms
from wtforms import validators
from ..forms import ImageModelForm
class GenericImageModelForm(ImageModelForm):
"""
Defines the form used to create a new GenericImageModelJob
"""
pass
|
{"/digits/model/tasks/test_caffe_train.py": ["/digits/model/tasks/__init__.py"], "/digits/model/images/classification/forms.py": ["/digits/model/images/forms.py"], "/digits/model/images/generic/forms.py": ["/digits/model/images/forms.py"]}
|
2,936
|
JiTao3/hierarchical_attention
|
refs/heads/master
|
/util/qerror.py
|
from typing import List
import numpy as np
def cal_q_error(predict, label, log=True):
if log:
predict = np.e**predict
label = np.e**label
if predict > label:
q_error = predict / label
else:
q_error = label / predict
return q_error
def print_qerror(q_error: List):
print("max qerror: {:.4f}".format(max(q_error)))
print("mean qerror: {:.4f}".format(np.mean(q_error)))
print("media qerror: {:.4f}".format(np.median(q_error)))
print("90th qerror: {:.4f}".format(np.percentile(q_error, 90)))
print("95th qerror: {:.4f}".format(np.percentile(q_error, 95)))
print("99th qerror: {:.4f}".format(np.percentile(q_error, 99)))
|
{"/util/prase_tree2node_leaf.py": ["/util/plan_to_tree.py"], "/train.py": ["/model/encoder.py", "/util/dataset.py"], "/model/decoder.py": ["/util/plan_to_tree.py", "/util/prase_tree2node_leaf.py", "/model/encoder.py"], "/model/encoder.py": ["/util/plan_to_tree.py", "/util/prase_tree2node_leaf.py", "/util/dataset.py"], "/util/dataset.py": ["/util/plan_to_tree.py", "/util/prase_tree2node_leaf.py"], "/util/result.py": ["/util/qerror.py"]}
|
2,937
|
JiTao3/hierarchical_attention
|
refs/heads/master
|
/util/prase_tree2node_leaf.py
|
from typing import List
from collections import deque
import copy
import numpy as np
import torch
from util.plan_to_tree import Node, parse_dep_tree_text
def add_node_index(root: Node) -> Node:
# add an index tu the tree to identify a node uniquely
# so that we can jsutufy the ancenstral relationship between two node
index = 1
def add_index(root: Node):
nonlocal index
if not root:
return -1
root.index = index
index += 1
for child in root.children:
add_index(child)
add_index(root)
return root
def is_ancestor(leaf: Node, node: Node) -> bool:
# function to determine whether node is an ancester of leaf
node_queue = deque([node])
while node_queue:
cnt_node = node_queue.popleft()
for child in cnt_node.children:
node_queue.append(child)
if child.index == leaf.index:
return True
return False
def parse_tree2leaves_node(root: Node):
leaf = []
node = []
def plan_tree_leaves_node(root: Node):
# return the tree leaves and node list
if root.children:
node.append(root)
for child in root.children:
plan_tree_leaves_node(child)
else:
leaf.append(root)
plan_tree_leaves_node(root)
return leaf, node
def treeInterpolation(root: Node, leaf, node):
# global FEATURE_LEN
add_node_index(root)
feature_len = leaf.shape[-1]
leaf_order, node_order = parse_tree2leaves_node(root=root)
tree_depth = len(node_order)
tree_width = len(leaf_order)
interpolation_vec = torch.zeros((tree_depth + 1, tree_width, feature_len), dtype=torch.double)
for leaf_index in range(tree_width):
interpolation_vec[tree_depth][leaf_index] = leaf[leaf_index]
for leaf_index in range(tree_width):
for node_index in range(tree_depth):
if is_ancestor(leaf=leaf_order[leaf_index], node=node_order[node_index]):
interpolation_vec[node_index][leaf_index] = node[node_index]
hierarchical_embeddings_vec = hierarchical_embeddings(
root=root, leaf_order=leaf_order, node_order=node_order, feature_len=feature_len
)
# print(torch.nonzero(hierarchical_embeddings_vec))
# test_upward(interpolation_vec)
return interpolation_vec + hierarchical_embeddings_vec
def vertical_deepth(node: Node, leaf: Node) -> int:
deepth = 0
node_queue = deque([node])
# size = len(node_queue)
while node_queue:
size = len(node_queue)
deepth += 1
while size:
cnt_node = node_queue.popleft()
size -= 1
for child in cnt_node.children:
node_queue.append(child)
if child.index == leaf.index:
return deepth
def horizontal_width(root: Node) -> int:
# if only root it will return root
leaf, _ = parse_tree2leaves_node(root=root)
return len(leaf)
def hierarchical_embeddings(root: Node, leaf_order: List, node_order: List, feature_len: int):
# global FEATURE_LEN
tree_depth = len(node_order)
tree_width = len(leaf_order)
# feature_len =
vertical_len = feature_len // 2
horizontal_len = feature_len // 2
hierarchical_emebdding_vec = torch.zeros(
(tree_depth + 1, tree_width, feature_len), dtype=torch.double)
for leaf_index in range(tree_width):
for node_index in range(tree_depth):
node = node_order[node_index]
leaf = leaf_order[leaf_index]
if is_ancestor(leaf=leaf, node=node):
depth = vertical_deepth(node=node, leaf=leaf)
width = horizontal_width(root=node)
# need to check depth and width < horizonal_len
assert depth < horizontal_len and width < vertical_len
hierarchical_emebdding_vec[node_index][leaf_index][depth - 1] = 1.0
hierarchical_emebdding_vec[node_index][leaf_index][horizontal_len + width - 1] = 1.0
return hierarchical_emebdding_vec
def upward_ca(interpolation_vec):
interpolation_vec_cp = copy.copy(interpolation_vec)
tree_depth, tree_width, feature_len = interpolation_vec.shape
upward_ca_vec = torch.zeros((tree_depth - 1, tree_width, feature_len), dtype=torch.double)
for leaf_index in range(tree_width):
for node_index in range(tree_depth - 1):
if interpolation_vec_cp[node_index][leaf_index].detach().numpy().any():
# if(torch.is_nonzero(interpolation_vec[node_index][leaf_index])):
num_not_null = 1
upward_ca_vec[node_index][leaf_index] = interpolation_vec[tree_depth - 1][leaf_index]
for in_node_index in range(node_index, tree_depth - 1):
if interpolation_vec_cp[in_node_index][leaf_index].detach().numpy().any():
# if(torch.is_nonzero(interpolation_vec[in_node_index][leaf_index])):
upward_ca_vec[node_index][leaf_index] += interpolation_vec[in_node_index][leaf_index]
num_not_null += 1
# print(num_not_null)
upward_ca_vec[node_index][leaf_index] /= num_not_null
# test_upward(upward_ca_vec)
return upward_ca_vec
def weightedAggregationCoeffi(root: Node):
leaf_order, node_order = parse_tree2leaves_node(root=root)
tree_depth = len(node_order)
tree_width = len(leaf_order)
agg_coeffi = torch.zeros((tree_depth), dtype=torch.double)
agg_coeffi += torch.tensor([tree_width], dtype=torch.double)
leaves_nodes = [parse_tree2leaves_node(rot) for rot in node_order]
tree_size = [len(leaves) + len(nodes) for leaves, nodes in leaves_nodes]
agg_coeffi += torch.tensor(tree_size, dtype=torch.double)
return 1 / agg_coeffi
# def weighted_aggregation(upward_ca_vec):
# # upward ca vec with dim = node + 1 * leaf * d
# dim = upward_ca_vec.shape[2]
# no_zero = np.count_nonzero(upward_ca_vec, axis=(1, 2))/dim
# upward_ca_sum = np.sum(upward_ca_vec, axis=1)
# # no_zero * upward ca sum in each line
# weighted_aggregation_vec = upward_ca_sum * np.expand_dims(no_zero, 1)
# return weighted_aggregation_vec
def test_interpolation():
plan_tree, max_children = parse_dep_tree_text(folder_name="./data")
add_node_index(plan_tree[1])
leaf_order, node_order = parse_tree2leaves_node(root=plan_tree[1])
tree_depth = len(node_order)
tree_width = len(leaf_order)
print(tree_depth, tree_width)
test_interpolation = np.zeros((tree_depth, tree_width), dtype=np.double)
for leaf_index in range(tree_width):
for node_index in range(tree_depth):
if is_ancestor(leaf=leaf_order[leaf_index], node=node_order[node_index]):
test_interpolation[node_index][leaf_index] = 1
print(test_interpolation)
def test_upward(upward_ca_vec):
test_upward_vec = torch.sum(upward_ca_vec, dim=-1)
print(torch.nonzero(test_upward_vec))
def tree2NodeLeafmat(root: Node):
global FEATURE_LEN
leaf_order, node_order = parse_tree2leaves_node(root)
node_mat = np.array([node.data for node in node_order], dtype=np.double)
leaf_mat = np.array([leaf.data for leaf in leaf_order], dtype=np.double)
nodemat, leafmat = (torch.from_numpy(node_mat).double(), torch.from_numpy(leaf_mat).double())
return nodemat, leafmat
if __name__ == "__main__":
# print(os.path.abspath('.'))
plan_tree, max_children = parse_dep_tree_text(folder_name="./data")
add_node_index(plan_tree[1])
leaf_order, node_order = parse_tree2leaves_node(root=plan_tree[1])
|
{"/util/prase_tree2node_leaf.py": ["/util/plan_to_tree.py"], "/train.py": ["/model/encoder.py", "/util/dataset.py"], "/model/decoder.py": ["/util/plan_to_tree.py", "/util/prase_tree2node_leaf.py", "/model/encoder.py"], "/model/encoder.py": ["/util/plan_to_tree.py", "/util/prase_tree2node_leaf.py", "/util/dataset.py"], "/util/dataset.py": ["/util/plan_to_tree.py", "/util/prase_tree2node_leaf.py"], "/util/result.py": ["/util/qerror.py"]}
|
2,938
|
JiTao3/hierarchical_attention
|
refs/heads/master
|
/util/__init__.py
|
__all__=[
'plan_to_tree',
'prase_tree2node_leaf'
]
|
{"/util/prase_tree2node_leaf.py": ["/util/plan_to_tree.py"], "/train.py": ["/model/encoder.py", "/util/dataset.py"], "/model/decoder.py": ["/util/plan_to_tree.py", "/util/prase_tree2node_leaf.py", "/model/encoder.py"], "/model/encoder.py": ["/util/plan_to_tree.py", "/util/prase_tree2node_leaf.py", "/util/dataset.py"], "/util/dataset.py": ["/util/plan_to_tree.py", "/util/prase_tree2node_leaf.py"], "/util/result.py": ["/util/qerror.py"]}
|
2,939
|
JiTao3/hierarchical_attention
|
refs/heads/master
|
/train.py
|
import math
from model.encoder import Encoder
from util.dataset import PlanDataset
import torch
import torch.optim as optim
import torch.nn as nn
from torch.utils.data import DataLoader, random_split
from torchsummary import summary
# device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
dataset = PlanDataset(root_dir="data/deep_cardinality")
dataloader = DataLoader(dataset, batch_size=1, shuffle=True)
train_size = int(len(dataset) * 0.8)
test_size = len(dataset) - train_size
# train_temp = [dataset[i] for i in range(10)]
# test_temp = [dataset[i] for i in range(5)]
train_dataset, test_dataset = random_split(dataset, [train_size, test_size])
# train_loader = DataLoader(train_dataset, batch_size=1, shuffle=True, num_workers=2)
# test_loader = DataLoader(test_dataset, batch_size=1, shuffle=False, num_workers=2)
encoder = Encoder(d_feature=9 + 6 + 64, d_model=256, d_ff=128, N=4).double()
summary(encoder)
criterion = nn.MSELoss()
optimizer = optim.Adam(encoder.parameters(), lr=0.001)
epoch_size = 2
def train():
result = []
for epoch in range(epoch_size):
print("epoch : ", epoch)
running_loss = 0.0
for i, data in enumerate(train_dataset):
tree, nodemat, leafmat, label = data
optimizer.zero_grad()
output = encoder(tree, nodemat.double(), leafmat.double())
# output = output
if len(output.shape) > 1 or len(label.shape) > 1:
print("output: {} ,label: {}".format(len(output.shape), len(label.shape)))
loss = criterion(output, label)
loss.backward()
optimizer.step()
running_loss += loss.item()
if math.isnan(running_loss):
print("nan: ", i, "\t", running_loss)
if i % 200 == 0 and i != 0:
print("[%d, %5d] loss: %4f" % (epoch + 1, i + 1, running_loss / 200))
running_loss = 0.0
test_loss = 0.0
with torch.no_grad():
for i, data in enumerate(test_dataset):
tree, nodemat, leafmat, label = data
test_output = encoder(tree, nodemat, leafmat)
if epoch == epoch_size - 1:
result.append((label, test_output))
loss = criterion(test_output, label)
test_loss += loss.item()
if i % 200 == 0 and i != 0:
print("test loss: ", test_loss / test_size)
return result
def dataset_test():
for i, data in enumerate(test_dataset):
tree, nodemat, leafmat, label = data
print(label)
if __name__ == "__main__":
result = train()
# result = [(1.1, 2.2), (3.3, 4.4), (5.5, 6.6)]
with open("data/dmodel256/resutldeep_cv1.0dff128-e2-N4-lr0.001.txt", "w") as f:
f.write("\n".join("{} {}".format(x[0].item(), x[1].item()) for x in result))
# torch.save(encoder, "model_parameter/encoderv1.0.pkl")
# dataset_test()
|
{"/util/prase_tree2node_leaf.py": ["/util/plan_to_tree.py"], "/train.py": ["/model/encoder.py", "/util/dataset.py"], "/model/decoder.py": ["/util/plan_to_tree.py", "/util/prase_tree2node_leaf.py", "/model/encoder.py"], "/model/encoder.py": ["/util/plan_to_tree.py", "/util/prase_tree2node_leaf.py", "/util/dataset.py"], "/util/dataset.py": ["/util/plan_to_tree.py", "/util/prase_tree2node_leaf.py"], "/util/result.py": ["/util/qerror.py"]}
|
2,940
|
JiTao3/hierarchical_attention
|
refs/heads/master
|
/model/decoder.py
|
from torch.autograd import Variable
import time
import copy
import math
import torch.nn.functional as F
import torch.nn as nn
import torch
import numpy as np
import os
import sys
sys.path.append(os.path.abspath(os.getcwd()))
print(sys.path)
from util.plan_to_tree import Node, parse_dep_tree_text
from util.prase_tree2node_leaf import (
treeInterpolation,
hierarchical_embeddings,
upward_ca,
tree2NodeLeafmat,
)
from model.encoder import attention, WeightedAggregation, LayerNorm, Reshape, clones
class DecoderLinear(nn.Module):
def __init__(self, d_feature, d_model):
super(DecoderLinear, self).__init__()
self.query_linear = nn.Linear(d_model, d_feature)
self.key_linear = nn.Linear(d_model, d_feature)
self.vlaue_linear = nn.Linear(d_model, d_feature)
def forward(self, x, target):
value = self.value_linear(x)
key = self.key_linear(x)
query = self.query_linear(target)
return value, key, query
class DecoderAttentionScaledDot(nn.Module):
def __init__(self, d_feature, d_model, dropout=0.1):
super(DecoderAttentionScaledDot, self).__init__()
# self.decoderLiner = DecoderLinear(d_feature, d_model)
self.dropout = nn.Dropout(p=dropout)
def forward(self, q_target, node_k, leaf_k, mask=None):
Aqn = attention(query=q_target, key=node_k, mask=mask, dropout=self.dropout)
Aql = attention(query=q_target, key=leaf_k, mask=mask, dropout=self.dropout)
return Aqn, Aql
class DecoderAttention(nn.Module):
def __init__(self, d_feature, d_model):
super(DecoderAttention, self).__init__()
self.linear = DecoderLinear(d_feature=d_feature, d_model=d_model)
self.scaledDot = DecoderAttentionScaledDot(d_feature=d_feature, d_model=d_model)
self.weightedAgg = WeightedAggregation(d_feature)
def forward(self, root, node, leaf, target):
node_v, node_k, node_q = self.linear(node, target)
leaf_v, leaf_k, leaf_q = self.linear(leaf, target)
# node_q == leaf_q is target
Aqn, Aql = self.scaledDot(node_q, node_k, leaf_k)
# !!!! node_hat = ???
# but you should keep the order of node?!!!
# the order of node_q & node and leaf_q & leaf should be same
# you should use parse tree 2 node leaf plan_tree_leaves_node to keep the order
interpolation_vec = treeInterpolation(root=root, leaf=leaf_v, node=node_v)
# node + 1 * leaf * d
# you should use parse tree 2 node leaf plan_tree_leaves_node to keep the order
upward_ca_vec = upward_ca(interpolation_vec)
# upward_ca_tensor = torch.from_numpy(upward_ca_vec)
node_hat = self.weightAgg(leaf, upward_ca_vec)
leaf_hat = leaf_v
# !!!! dim
Attq = F.softmax(
torch.matmul(
torch.cat(Aqn, Aql), torch.cat(node_hat.double(), leaf_hat, dim=-2)
)
)
return Attq
class DecoderLayer(nn.Module):
def __init__(self, d_feature, d_model, d_ff):
super(DecoderLayer, self).__init__()
self.norm1 = LayerNorm(d_feature)
self.norm2 = LayerNorm(d_feature)
self.decoderAttention = DecoderAttention(d_feature, d_model)
self.feed_forward = nn.Sequential(
nn.Linear(d_model, d_ff), nn.ReLU(), nn.Linear(d_ff, d_model)
)
def forward(self, root, node_x, leaf_x, target):
# !!! target + mask(norm(attention(target)))
x = self.decoderAttention(root, node_x, leaf_x, target)
x = x + self.norm1(x)
x = self.feed_forward(x)
x = x + self.norm2(x)
return x
class Decoder(nn.Module):
def __init__(self, d_feature, d_model, d_ff, N):
super(Decoder, self).__init__()
self.reshape = Reshape(d_feature=d_feature, d_model=d_model)
self.layers = clones(DecoderLayer, N)
def forward(self, root, node_x, leaf_x, target):
target = self.reshape(target)
for layer in self.layers:
target = layer(root, node_x, leaf_x, target)
return target
|
{"/util/prase_tree2node_leaf.py": ["/util/plan_to_tree.py"], "/train.py": ["/model/encoder.py", "/util/dataset.py"], "/model/decoder.py": ["/util/plan_to_tree.py", "/util/prase_tree2node_leaf.py", "/model/encoder.py"], "/model/encoder.py": ["/util/plan_to_tree.py", "/util/prase_tree2node_leaf.py", "/util/dataset.py"], "/util/dataset.py": ["/util/plan_to_tree.py", "/util/prase_tree2node_leaf.py"], "/util/result.py": ["/util/qerror.py"]}
|
2,941
|
JiTao3/hierarchical_attention
|
refs/heads/master
|
/model/encoder.py
|
import copy
import math
import torch.nn.functional as F
import torch.nn as nn
import torch
import numpy as np
import os
import sys
sys.path.append(os.path.abspath(os.getcwd()))
# print(sys.path)
from util.plan_to_tree import Node, parse_dep_tree_text
from util.prase_tree2node_leaf import treeInterpolation, upward_ca, tree2NodeLeafmat, weightedAggregationCoeffi
from util.dataset import PlanDataset
def clones(module, N):
if N <= 0:
return []
else:
return nn.ModuleList([copy.deepcopy(module) for _ in range(N)])
class LayerNorm(nn.Module):
def __init__(self, feature, eps=1e-6):
super(LayerNorm, self).__init__()
self.a_2 = nn.Parameter(torch.ones(feature), requires_grad=True)
self.b_2 = nn.Parameter(torch.zeros(feature), requires_grad=True)
self.eps = eps
def forward(self, x):
mean = x.mean(-1, keepdim=True)
std = x.std(-1, keepdim=True)
return self.a_2 * (x - mean) / (std + self.eps) + self.b_2
def attention(query, key, mask=None, dropout=None):
"""get score"""
d_k = query.size(-1)
scores = torch.matmul(query, key.transpose(-2, -1)) / math.sqrt(d_k)
if mask is not None:
scores = scores.masked_fill(mask == 0, -1e9)
p_attn = F.softmax(scores, dim=-1)
if dropout is not None:
p_attn = dropout(p_attn)
return p_attn
class TreeAttentionLinear(nn.Module):
def __init__(self, d_feature, d_model, dropout=0.1):
super(TreeAttentionLinear, self).__init__()
self.query_linear = nn.Linear(d_feature, d_model)
self.key_linear = nn.Linear(d_feature, d_model)
self.vlaue_linear = nn.Linear(d_feature, d_model)
def forward(self, x):
q = self.query_linear(x)
k = self.key_linear(x)
v = self.vlaue_linear(x)
return q, k, v
class TreeAttentionScaledDot(nn.Module):
def __init__(self, d_feature, dropout=0.1):
super(TreeAttentionScaledDot, self).__init__()
# !!! use different dropout ???
self.dropout = nn.Dropout(p=dropout)
# self.leafLinear = nn.Linear(d_feature, d_feature)
def forward(self, node_q, node_k, leaf_q, leaf_k, mask=None):
Anl = attention(query=node_q, key=leaf_k, mask=mask, dropout=self.dropout)
Ann = attention(query=node_q, key=node_k, mask=mask, dropout=self.dropout)
All = attention(query=leaf_q, key=leaf_k, mask=mask, dropout=self.dropout)
Aln = attention(query=leaf_q, key=node_k, mask=mask, dropout=self.dropout)
return Anl, Ann, All, Aln
class WeightedAggregation(nn.Module):
def __init__(self, d_feature):
super(WeightedAggregation, self).__init__()
# !!!
self.u_s = nn.Parameter(torch.rand(d_feature, requires_grad=True))
self.register_parameter("U_s", self.u_s)
self.d_featuer = d_feature
def forward(self, root, leaf, upward_ca_vec):
# omega size leaf * d
omega = torch.matmul(leaf, self.u_s)
# upward_ca_vec size node * leaf * d
omega_shape = omega.shape[-1]
weighted_aggregation_vec = upward_ca_vec * omega.reshape([1, omega_shape, 1])
# no_zero shape node * 1
# weight_aggregation_vec shape is node*leaf*d
weighted_aggregation_vec = torch.sum(weighted_aggregation_vec, dim=1)
# weight_aggregation_vec shape is node*d
# upward_ca_vec_cp = copy.copy(upward_ca_vec)
# nozero_div = (np.count_nonzero(upward_ca_vec_cp.detach().numpy(), axis=(1, 2)) + 1e-6) / self.d_featuer
# no_zero = 1 / nozero_div
# # no_zero_shape =
# no_zero = torch.from_numpy(no_zero)
# weighted_aggregation_vec = weighted_aggregation_vec * torch.unsqueeze(no_zero, 1)
div = weightedAggregationCoeffi(root=root)
weighted_aggregation_vec = weighted_aggregation_vec * torch.unsqueeze(div, 1)
return weighted_aggregation_vec
class TreeAttention(nn.Module):
def __init__(self, d_feature, d_model):
super(TreeAttention, self).__init__()
self.nodelinear = TreeAttentionLinear(d_feature=d_feature, d_model=d_model)
self.leaflinear = TreeAttentionLinear(d_feature=d_feature, d_model=d_model)
self.scaledDot = TreeAttentionScaledDot(d_feature=d_feature)
self.weightAgg = WeightedAggregation(d_feature=d_feature)
def forward(self, root: Node, node, leaf):
node_q, node_k, node_v = self.nodelinear(node)
leaf_q, leaf_k, leaf_v = self.leaflinear(leaf)
Anl, Ann, All, Aln = self.scaledDot(node_q, node_k, leaf_q, leaf_k)
# !!!! node_hat = ???
# but you should keep the order of node?!!!
# the order of node_q & node and leaf_q & leaf should be same
# you should use parse tree 2 node leaf plan_tree_leaves_node to keep the order
interpolation_vec = treeInterpolation(root=root, leaf=leaf_v, node=node_v)
# node + 1 * leaf * d
# you should use parse tree 2 node leaf plan_tree_leaves_node to keep the order
upward_ca_vec = upward_ca(interpolation_vec)
# upward_ca_tensor = torch.from_numpy(upward_ca_vec)
node_hat = self.weightAgg(root, leaf, upward_ca_vec)
leaf_hat = leaf_v
# 1)!!! node_hat = ???
# 2) cat the matrix and return attn and attl
# !!! DIM
# !!! mask
# AnnAnl = torch.cat((Ann, Anl),dim=-1)
# leafnodehat = torch.cat((node_hat.float(), leaf_hat),dim=-2)
Attn = torch.matmul(
F.softmax(torch.cat((Ann, Anl), dim=-1), dim=-2),
torch.cat((node_hat, leaf_hat), dim=-2),
)
Attl = torch.matmul(
F.softmax(torch.cat((Aln, All), dim=-1), dim=-2),
torch.cat((node_hat, leaf_hat), dim=-2),
)
return Attn, Attl
class Reshape(nn.Module):
def __init__(self, d_feature, d_model):
super(Reshape, self).__init__()
self.reshape = nn.Sequential(nn.Linear(d_feature, d_model), nn.ReLU())
def forward(self, x):
return self.reshape(x)
class EncoderLayer(nn.Module):
def __init__(self, d_feature, d_model, d_ff):
super(EncoderLayer, self).__init__()
# self.reshape = nn.Linear(d_feature, d_model)
self.treeattn = TreeAttention(d_feature, d_model)
# Wo
# !!! d
self.linear = nn.Linear(d_model, d_model)
# self.reshape = Reshape(d_feature, d_model)
self.norm1 = LayerNorm(d_model)
self.norm2 = LayerNorm(d_model)
self.feed_forward = nn.Sequential(
nn.Linear(d_model, d_ff),
nn.ReLU(),
nn.Linear(d_ff, d_ff // 2),
nn.ReLU(),
nn.Linear(d_ff // 2, d_model),
nn.ReLU()
)
def forward(self, root, node, leaf):
Attn, Attl = self.treeattn(root, node, leaf)
Attno, Attlo = self.linear(Attn), self.linear(Attl)
node_x = node + self.norm1(Attno)
leaf_x = leaf + self.norm2(Attlo)
feed_node_x = self.feed_forward(node_x)
feed_leaf_x = self.feed_forward(leaf_x)
node_x = node_x + self.norm2(feed_node_x)
leaf_x = leaf_x + self.norm2(feed_leaf_x)
return node_x, leaf_x
class Encoder(nn.Module):
def __init__(self, d_feature, d_model, d_ff, N):
super(Encoder, self).__init__()
self.reshape = Reshape(d_feature=d_feature, d_model=d_model)
self.firstEncoder = EncoderLayer(d_feature=d_feature, d_model=d_feature, d_ff=d_model)
self.layers = clones(
EncoderLayer(d_feature=d_model, d_model=d_model, d_ff=d_ff), N=N - 1
)
self.forward_net = nn.Sequential(
nn.Linear(d_model, 1),
nn.ReLU(),
)
def forward(self, root, node, leaf):
# node = self.reshape(node)
# leaf = self.reshape(leaf)
node, leaf = self.firstEncoder(root, node, leaf)
node, leaf = self.reshape(node), self.reshape(leaf)
for layer in self.layers:
node, leaf = layer(root, node, leaf)
x = torch.cat((node, leaf), dim=-2)
# max pool
x = torch.max(x, dim=-2, keepdim=True)[0]
x = self.forward_net(x)
return x.squeeze(-1)
if __name__ == "__main__":
encoder = Encoder(d_feature=9 + 6 + 64, d_model=512, d_ff=512, N=2).double()
dataset = PlanDataset(root_dir="data/deep_cardinality")
tree, nodemat, leafmat, label = dataset[51]
print(nodemat.shape, leafmat.shape)
x = encoder(tree, nodemat.double(), leafmat.double())
print(x)
|
{"/util/prase_tree2node_leaf.py": ["/util/plan_to_tree.py"], "/train.py": ["/model/encoder.py", "/util/dataset.py"], "/model/decoder.py": ["/util/plan_to_tree.py", "/util/prase_tree2node_leaf.py", "/model/encoder.py"], "/model/encoder.py": ["/util/plan_to_tree.py", "/util/prase_tree2node_leaf.py", "/util/dataset.py"], "/util/dataset.py": ["/util/plan_to_tree.py", "/util/prase_tree2node_leaf.py"], "/util/result.py": ["/util/qerror.py"]}
|
2,942
|
JiTao3/hierarchical_attention
|
refs/heads/master
|
/util/plan_to_tree.py
|
import os
import numpy as np
operators = [
"Merge Join",
"Hash",
"Index Only Scan using title_pkey on title t",
"Sort",
"Seq Scan",
"Index Scan using title_pkey on title t",
"Materialize",
"Nested Loop",
"Hash Join",
]
columns = [
"ci.movie_id",
"t.id",
"mi_idx.movie_id",
"mi.movie_id",
"mc.movie_id",
"mk.movie_id",
]
scan_features = np.load("/home/jitao/hierarchical_attention/model_parameter/featuer_deep_cardinality.npy")
def extract_time(line):
data = line.replace("->", "").lstrip().split(" ")[-1].split(" ")
start_cost = data[0].split("..")[0].replace("(cost=", "")
end_cost = data[0].split("..")[1]
rows = data[1].replace("rows=", "")
width = data[2].replace("width=", "").replace(")", "")
a_start_cost = data[4].split("..")[0].replace("time=", "")
a_end_cost = data[4].split("..")[1]
a_rows = data[5].replace("rows=", "")
return (
float(start_cost),
float(end_cost),
float(rows),
float(width),
float(a_start_cost),
float(a_end_cost),
float(a_rows),
)
def extract_operator(line):
operator = line.replace("->", "").lstrip().split(" ")[0]
if operator.startswith("Seq Scan"):
operator = "Seq Scan"
return operator, operator in operators
def extract_attributes(operator, line, feature_vec, i=None):
operators = [
"Merge Join",
"Hash",
"Index Only Scan using title_pkey on title t",
"Sort",
"Seq Scan",
"Index Scan using title_pkey on title t",
"Materialize",
"Nested Loop",
"Hash Join",
]
columns = [
"ci.movie_id",
"t.id",
"mi_idx.movie_id",
"mi.movie_id",
"mc.movie_id",
"mk.movie_id",
]
operators_count = len(operators) # 9
if operator in ["Hash", "Materialize", "Nested Loop"]:
pass
elif operator == "Merge Join":
if "Cond" in line:
for column in columns:
if column in line:
feature_vec[columns.index(column) + operators_count] = 1.0
elif operator == "Index Only Scan using title_pkey on title t":
# feature_vec[15:56] = scan_features[i]
if "Cond" in line:
feature_vec[columns.index("t.id") + operators_count] = 1.0
for column in columns:
if column in line:
feature_vec[columns.index(column) + operators_count] = 1.0
elif operator == "Sort":
for column in columns:
if column in line:
feature_vec[columns.index(column) + operators_count] = 1.0
elif operator == "Index Scan using title_pkey on title t":
# feature_vec[15:56] = scan_features[i]
if "Cond" in line:
feature_vec[columns.index("t.id") + operators_count] = 1.0
for column in columns:
if column in line:
feature_vec[columns.index(column) + operators_count] = 1.0
elif operator == "Hash Join":
if "Cond" in line:
for column in columns:
if column in line:
feature_vec[columns.index(column) + operators_count] = 1.0
elif operator == "Seq Scan":
feature_vec[15:79] = scan_features[i] # 64
"""Tree node class"""
class Node(object):
def __init__(self, data, parent=None, index=-1):
self.data = data
self.children = []
self.parent = parent
self.index = index
def add_child(self, obj):
self.children.append(obj)
def add_parent(self, obj):
self.parent = obj
def __str__(self, tabs=0):
tab_spaces = str.join("", [" " for i in range(tabs)])
return (
tab_spaces + "+-- Node: " + str.join("|", self.data) + "\n" +
str.join("\n", [child.__str__(tabs + 2) for child in self.children])
)
def parse_dep_tree_text(folder_name="data"):
scan_cnt = 0
max_children = 0
plan_trees = []
feature_len = 9 + 6 + 7 + 64
for each_plan in sorted(os.listdir(folder_name)):
# print(each_plan)
with open(os.path.join(folder_name, each_plan), "r") as f:
lines = f.readlines()
feature_vec = [0.0] * feature_len
operator, in_operators = extract_operator(lines[0])
if not in_operators:
operator, in_operators = extract_operator(lines[1])
start_cost, end_cost, rows, width, a_start_cost, a_end_cost, a_rows = extract_time(
lines[1]
)
j = 2
else:
start_cost, end_cost, rows, width, a_start_cost, a_end_cost, a_rows = extract_time(
lines[0]
)
j = 1
feature_vec[feature_len - 7: feature_len] = [
start_cost,
end_cost,
rows,
width,
a_start_cost,
a_end_cost,
a_rows,
]
feature_vec[operators.index(operator)] = 1.0
if operator == "Seq Scan":
extract_attributes(operator, lines[j], feature_vec, scan_cnt)
scan_cnt += 1
root_tokens = feature_vec
current_node = Node(root_tokens)
plan_trees.append(current_node)
continue
else:
while "actual" not in lines[j] and "Plan" not in lines[j]:
extract_attributes(operator, lines[j], feature_vec)
j += 1
root_tokens = feature_vec # 所有吗
current_node = Node(root_tokens)
plan_trees.append(current_node)
spaces = 0
node_stack = []
i = j
while not lines[i].startswith("Planning time"):
line = lines[i]
i += 1
if line.startswith("Planning time") or line.startswith(
"Execution time"
):
break
elif line.strip() == "":
break
elif "->" not in line:
continue
else:
if line.index("->") < spaces:
while line.index("->") < spaces:
current_node, spaces = node_stack.pop()
if line.index("->") > spaces:
line_copy = line
feature_vec = [0.0] * feature_len
start_cost, end_cost, rows, width, a_start_cost, a_end_cost, a_rows = extract_time(
line_copy
)
feature_vec[feature_len - 7: feature_len] = [
start_cost,
end_cost,
rows,
width,
a_start_cost,
a_end_cost,
a_rows,
]
operator, in_operators = extract_operator(line_copy)
feature_vec[operators.index(operator)] = 1.0
if operator == "Seq Scan":
extract_attributes(
operator, line_copy, feature_vec, scan_cnt
)
scan_cnt += 1
else:
j = 0
while (
"actual" not in lines[i + j] and "Plan" not in lines[i + j]
):
extract_attributes(operator, lines[i + j], feature_vec)
j += 1
tokens = feature_vec
new_node = Node(tokens, parent=current_node)
current_node.add_child(new_node)
if len(current_node.children) > max_children:
max_children = len(current_node.children)
node_stack.append((current_node, spaces))
current_node = new_node
spaces = line.index("->")
elif line.index("->") == spaces:
line_copy = line
feature_vec = [0.0] * feature_len
start_cost, end_cost, rows, width, a_start_cost, a_end_cost, a_rows = extract_time(
line_copy
)
feature_vec[feature_len - 7: feature_len] = [
start_cost,
end_cost,
rows,
width,
a_start_cost,
a_end_cost,
a_rows,
]
operator, in_operators = extract_operator(line_copy)
feature_vec[operators.index(operator)] = 1.0
if operator == "Seq Scan":
extract_attributes(
operator, line_copy, feature_vec, scan_cnt
)
scan_cnt += 1
else:
j = 0
while (
"actual" not in lines[i + j] and "Plan" not in lines[i + j]
):
extract_attributes(operator, lines[i + j], feature_vec)
j += 1
tokens = feature_vec
new_node = Node(tokens, parent=node_stack[-1][0])
node_stack[-1][0].add_child(new_node)
if len(node_stack[-1][0].children) > max_children:
max_children = len(node_stack[-1][0].children)
current_node = new_node
spaces = line.index("->")
# break
# print(scan_cnt)
return plan_trees, max_children # a list of the roots nodes
def parse_dep_tree_text_lb_ub(folder_name="data/"):
scan_cnt = 0
max_children = 0
plan_trees = []
feature_len = 9 + 6 + 7 + 32
for each_plan in sorted(os.listdir(folder_name)):
# print(each_plan)
with open(os.path.join(folder_name, each_plan), "r") as f:
lines = f.readlines()
feature_vec = [0.0] * feature_len
operator, in_operators = extract_operator(lines[0])
if not in_operators:
operator, in_operators = extract_operator(lines[1])
start_cost, end_cost, rows, width, a_start_cost, a_end_cost, a_rows = extract_time(
lines[1]
)
j = 2
else:
start_cost, end_cost, rows, width, a_start_cost, a_end_cost, a_rows = extract_time(
lines[0]
)
j = 1
feature_vec[feature_len - 7: feature_len] = [
start_cost,
end_cost,
rows,
width,
a_start_cost,
a_end_cost,
a_rows,
]
feature_vec[operators.index(operator)] = 1.0
if operator == "Seq Scan":
extract_attributes(operator, lines[j], feature_vec, scan_cnt)
scan_cnt += 1
root_tokens = feature_vec
current_node = Node(root_tokens)
plan_trees.append(current_node)
continue
else:
while "actual" not in lines[j] and "Plan" not in lines[j]:
extract_attributes(operator, lines[j], feature_vec)
j += 1
root_tokens = feature_vec # 所有吗
current_node = Node(root_tokens)
plan_trees.append(current_node)
spaces = 0
node_stack = []
i = j
while not lines[i].startswith("Planning time"):
line = lines[i]
i += 1
if line.startswith("Planning time") or line.startswith(
"Execution time"
):
break
elif line.strip() == "":
break
elif "->" not in line:
continue
else:
if line.index("->") < spaces:
while line.index("->") < spaces:
current_node, spaces = node_stack.pop()
if line.index("->") > spaces:
line_copy = line
feature_vec = [0.0] * feature_len
start_cost, end_cost, rows, width, a_start_cost, a_end_cost, a_rows = extract_time(
line_copy
)
feature_vec[feature_len - 7: feature_len] = [
start_cost,
end_cost,
rows,
width,
a_start_cost,
a_end_cost,
a_rows,
]
operator, in_operators = extract_operator(line_copy)
feature_vec[operators.index(operator)] = 1.0
if operator == "Seq Scan":
# if(operator == "Seq Scan" or operator == "Index Only Scan using title_pkey on title t"
# or operator=='Index Scan using title_pkey on title t'):
extract_attributes(
operator, line_copy, feature_vec, scan_cnt
)
scan_cnt += 1
else:
j = 0
while (
"actual" not in lines[i + j] and "Plan" not in lines[i + j]
):
extract_attributes(operator, lines[i + j], feature_vec)
j += 1
tokens = feature_vec
new_node = Node(tokens, parent=current_node)
current_node.add_child(new_node)
if len(current_node.children) > max_children:
max_children = len(current_node.children)
node_stack.append((current_node, spaces))
current_node = new_node
spaces = line.index("->")
elif line.index("->") == spaces:
line_copy = line
feature_vec = [0.0] * feature_len
start_cost, end_cost, rows, width, a_start_cost, a_end_cost, a_rows = extract_time(
line_copy
)
feature_vec[feature_len - 7: feature_len] = [
start_cost,
end_cost,
rows,
width,
a_start_cost,
a_end_cost,
a_rows,
]
operator, in_operators = extract_operator(line_copy)
feature_vec[operators.index(operator)] = 1.0
if operator == "Seq Scan":
# if(operator == "Seq Scan" or operator == "Index Only Scan using title_pkey on title t" or
# operator=='Index Scan using title_pkey on title t'):
extract_attributes(
operator, line_copy, feature_vec, scan_cnt
)
scan_cnt += 1
else:
j = 0
while (
"actual" not in lines[i + j] and "Plan" not in lines[i + j]
):
extract_attributes(operator, lines[i + j], feature_vec)
j += 1
tokens = feature_vec
new_node = Node(tokens, parent=node_stack[-1][0])
node_stack[-1][0].add_child(new_node)
if len(node_stack[-1][0].children) > max_children:
max_children = len(node_stack[-1][0].children)
current_node = new_node
spaces = line.index("->")
# break
# print(scan_cnt)
return plan_trees, max_children # a list of the roots nodes
def p2t(node):
# prediction to true cardinality
# return float(start_cost),float(end_cost),float(rows),float(width),
# float(a_start_cost),float(a_end_cost),float(a_rows)
tree = {}
tmp = node.data
operators_count = 9
columns_count = 6
scan_features = 64
assert len(tmp) == operators_count + columns_count + 7 + scan_features
tree["features"] = tmp[: operators_count + columns_count + scan_features]
# tree['features'].append(tmp[-5]) #with card as feature
tree["features"].append(tmp[-1]) # with Actual card as feature
# cardinality
# tree['labels'] = np.log(node.data[-1]+1) #cardinality
# tree['pg'] = np.log(node.data[-5])
# cost
tree["labels"] = np.log(node.data[-2]) # cost
tree["pg"] = np.log(node.data[-6])
tree["children"] = []
for children in node.children:
tree["children"].append(p2t(children))
return tree
def tree_feature_label(root: Node):
label = root.data[-1]
operators_count = 9
columns_count = 6
scan_features = 64
feature_len = operators_count + columns_count + scan_features
def feature(root: Node):
root.data = root.data[:feature_len]
if root.children:
for child in root.children:
feature(child)
feature(root)
return root, np.log(label) if label > 1 else label
if __name__ == "__main__":
print(os.path.abspath("."))
plan_tree, max_children = parse_dep_tree_text(folder_name="./data/deep_plan")
# add_node_index(plan_tree[1])
# leaf,node = test(plan_tree[1])
print(len(plan_tree))
|
{"/util/prase_tree2node_leaf.py": ["/util/plan_to_tree.py"], "/train.py": ["/model/encoder.py", "/util/dataset.py"], "/model/decoder.py": ["/util/plan_to_tree.py", "/util/prase_tree2node_leaf.py", "/model/encoder.py"], "/model/encoder.py": ["/util/plan_to_tree.py", "/util/prase_tree2node_leaf.py", "/util/dataset.py"], "/util/dataset.py": ["/util/plan_to_tree.py", "/util/prase_tree2node_leaf.py"], "/util/result.py": ["/util/qerror.py"]}
|
2,943
|
JiTao3/hierarchical_attention
|
refs/heads/master
|
/util/dataset.py
|
import time
import copy
import math
import torch.nn.functional as F
import torch.nn as nn
import torch
import numpy as np
import os
import sys
from torch.utils.data import Dataset, DataLoader
sys.path.append(os.path.abspath(os.getcwd()))
# print(sys.path)
from util.plan_to_tree import Node, parse_dep_tree_text, tree_feature_label
from util.prase_tree2node_leaf import tree2NodeLeafmat
class PlanDataset(Dataset):
def __init__(self, root_dir, transform=None):
self.root_dir = root_dir
self.planTrees, self.maxchild = parse_dep_tree_text(folder_name=root_dir)
self.trees_labels = [tree_feature_label(i) for i in self.planTrees]
self.transform = transform
def __len__(self):
return len(self.planTrees)
def __getitem__(self, idx):
if torch.is_tensor(idx):
idx = idx.tolist()
# root + label
tree, label = self.trees_labels[idx]
nodemat, leafmat = tree2NodeLeafmat(tree)
return (tree, nodemat, leafmat, torch.tensor(label, dtype=torch.double).reshape((1)))
def remove_signle_tree(root_dir, target_dir):
planTrees, _ = parse_dep_tree_text(folder_name=root_dir)
plan_dir = sorted(os.listdir(root_dir))
for dir_name, tree in zip(plan_dir, planTrees):
if tree.children:
with open(os.path.join(root_dir, dir_name), "r") as read_f:
lines = read_f.readlines()
with open(os.path.join(target_dir, dir_name), "w") as write_f:
write_f.writelines(lines)
def test_label():
dataset = PlanDataset(root_dir="/home/jitao/hierarchical_attention/data/deep_plan")
for i, data in enumerate(dataset):
tree, nodemat, leafmat, label = data
# print(label.shape)
print(label)
if np.isnan(label.numpy()):
print("nan:", i)
if np.isinf(label.numpy()):
print("inf", i)
if __name__ == "__main__":
remove_signle_tree(
# root_dir="/data1/jitao/dataset/cardinality/all_plan",
root_dir="/home/jitao/hierarchical_attention/data/cardinality",
target_dir="/home/jitao/hierarchical_attention/data/deep_cardinality",
)
# pass
# data = PlanDataset(root_dir="data/data2")
# test_label()
|
{"/util/prase_tree2node_leaf.py": ["/util/plan_to_tree.py"], "/train.py": ["/model/encoder.py", "/util/dataset.py"], "/model/decoder.py": ["/util/plan_to_tree.py", "/util/prase_tree2node_leaf.py", "/model/encoder.py"], "/model/encoder.py": ["/util/plan_to_tree.py", "/util/prase_tree2node_leaf.py", "/util/dataset.py"], "/util/dataset.py": ["/util/plan_to_tree.py", "/util/prase_tree2node_leaf.py"], "/util/result.py": ["/util/qerror.py"]}
|
2,944
|
JiTao3/hierarchical_attention
|
refs/heads/master
|
/util/result.py
|
import sys
import os
import numpy as np
sys.path.append(os.path.abspath(os.getcwd()))
from util.qerror import cal_q_error, print_qerror
with open("/home/jitao/hierarchical_attention/data/dmodel512/resutlv1.0-e10-N4-lr0.001.txt", 'r') as f:
lines = f.readlines()
label_output = [line.split(' ') for line in lines]
label = [float(label) for label, _ in label_output]
output = [float(output) for _, output in label_output]
len(label)
qerror = [cal_q_error(predict, actually) for predict, actually in zip(output, label)]
print_qerror(q_error=qerror)
|
{"/util/prase_tree2node_leaf.py": ["/util/plan_to_tree.py"], "/train.py": ["/model/encoder.py", "/util/dataset.py"], "/model/decoder.py": ["/util/plan_to_tree.py", "/util/prase_tree2node_leaf.py", "/model/encoder.py"], "/model/encoder.py": ["/util/plan_to_tree.py", "/util/prase_tree2node_leaf.py", "/util/dataset.py"], "/util/dataset.py": ["/util/plan_to_tree.py", "/util/prase_tree2node_leaf.py"], "/util/result.py": ["/util/qerror.py"]}
|
2,963
|
ZKDeep/Hand-Written-Urdu-Character-Recognition-using-DenseNet121
|
refs/heads/master
|
/arguments.py
|
# -*- coding: utf-8 -*-
"""
Created on Mon Dec 9 20:41:45 2019
@author: zubair
"""
batch_size = 2
no_epochs = 1
training_path = "train/"
validation_path = "valid/"
test_path = "test/"
|
{"/densenet121.py": ["/arguments.py"]}
|
2,964
|
ZKDeep/Hand-Written-Urdu-Character-Recognition-using-DenseNet121
|
refs/heads/master
|
/densenet121.py
|
# -*- coding: utf-8 -*-
"""
Created on Sat Sep 7 01:46:43 2019
@author: zubair
"""
import numpy as np
import keras
from matplotlib import pyplot as plt
from keras import Model
from keras import applications
from keras.preprocessing.image import ImageDataGenerator
import os
from keras.layers import Dense
from keras.layers import Activation, Flatten, GlobalAveragePooling2D
from keras.models import Sequential
from sklearn.metrics import classification_report, confusion_matrix
import numpy
import arguments
from sklearn.metrics import classification_report, confusion_matrix
import matplotlib.pyplot as plt
import seaborn as sns
import pandas as pd
print("type 'train' or 'test' for training or testing")
check = input()
train_path = arguments.training_path
valid_path = arguments.validation_path
test_path = arguments.test_path
labels_reading = arguments.training_path # This will generate labels as per folders name
class_lables = os.listdir(labels_reading)
train_batches = ImageDataGenerator().flow_from_directory(train_path, target_size=(224,224), classes=class_lables, batch_size= arguments.batch_size, shuffle = True)
valid_batches = ImageDataGenerator().flow_from_directory(valid_path, target_size=(224,224), classes=class_lables, batch_size= arguments.batch_size, shuffle = True)
test_batches = ImageDataGenerator().flow_from_directory(test_path, target_size=(224,224), classes=class_lables, batch_size= arguments.batch_size, shuffle = False)
classes = len(np.unique(train_batches.classes))
dense121 = keras.applications.DenseNet121(include_top=False, weights='imagenet')
new_model=dense121.output
new_model=GlobalAveragePooling2D()(new_model)
new_model=Dense(512,activation='relu')(new_model) #dense layer 3
preds=Dense(classes,activation='softmax')(new_model) #final layer with softmax activation
model=Model(inputs=dense121.input,outputs=preds)
for i,layer in enumerate(model.layers):
print(i,layer.name)
for layer in model.layers:
layer.trainable=True
model.compile(optimizer='SGD',loss='categorical_crossentropy',metrics=['accuracy'])
def training():
print("training the model")
try:
model.load_weights("results/weights.h5")
except:
print("No weights found training from scratch.....")
step_size_train = train_batches.n//train_batches.batch_size
hist = model.fit_generator(generator=train_batches, validation_data=valid_batches,
validation_steps= valid_batches.n//valid_batches.batch_size,
steps_per_epoch=step_size_train,
epochs=arguments.no_epochs)
model.save_weights("results/weights.h5")
print("Please training results............")
plt.plot(hist.history['acc'])
plt.plot(hist.history['val_acc'])
plt.title('model accuracy')
plt.ylabel('accuracy')
plt.xlabel('epoch')
plt.legend(['train', 'val'], loc='upper left')
plt.savefig('results/Acc.png')
plt.show()
plt.plot(hist.history['loss'])
plt.plot(hist.history['val_loss'])
plt.title('model loss')
plt.ylabel('loss')
plt.xlabel('epoch')
plt.legend(['train', 'val'], loc='upper left')
plt.savefig('results/loss.png')
plt.show()
testing()
def testing():
print("testing the model")
try:
model.load_weights("results/weights.h5")
except:
print("No weights found test on random weights")
step_size_test = test_batches.n//test_batches.batch_size
evl = model.evaluate_generator(generator=test_batches, steps = step_size_test, verbose=1)
Y_pred = model.predict_generator(test_batches, steps=step_size_test, verbose=1)
y_pred = np.argmax(Y_pred, axis=1)
dif = abs(len(y_pred) - len(test_batches.classes))
if dif > 0:
y_true = test_batches.classes[:-dif]
else:
y_true = test_batches.classes
print('Confusion Matrix')
print(confusion_matrix(y_true, y_pred))
matrix = confusion_matrix(y_true, y_pred)
sns.heatmap(matrix,annot=True,cbar=False)
y_true = pd.Series(y_true, name="Actual")
y_pred = pd.Series(y_pred, name="Predicted")
df_confusion = pd.crosstab(y_true, y_pred)
df_confusion.to_csv('results/confusion_matrix.csv')
print('Classification Report')
target_names = list((np.unique(y_true)))
for i in range(len(target_names)):
target_names[i] = str(target_names[i])
print(classification_report(y_true, y_pred, target_names=target_names))
return(evl)
if check == "train":
training()
elif check == "test":
print("testing")
testing()
|
{"/densenet121.py": ["/arguments.py"]}
|
2,965
|
lqfGaara/sinaSpider
|
refs/heads/master
|
/sinaSpider/start.py
|
from scrapy import cmdline
cmdline.execute("scrapy crawl sinaNewSpider".split())
|
{"/sinaSpider/spiders/sinaNewSpider.py": ["/sinaSpider/items.py"]}
|
2,966
|
lqfGaara/sinaSpider
|
refs/heads/master
|
/sinaSpider/spiders/sinaNewSpider.py
|
# -*- coding: utf-8 -*-
import scrapy
import os
from sinaSpider.items import SinaspiderItem
class SinanewspiderSpider(scrapy.Spider):
name = 'sinaNewSpider'
allowed_domains = ['news.sina.com.cn']
start_urls = ['http://news.sina.com.cn/guide/']
def parse(self, response):
# 父目录名
parentNames = response.xpath('//div[@class="article"]//h3/a/text()').extract()
# 父目录对应的url
parentUrls = response.xpath('//div[@class="article"]//h3/a/@href').extract()
# 子目录名
chlidNames = response.xpath('//div[@class="article"]//ul/li/a/text()').extract()
# 子目录对应的url
chlidUrls = response.xpath('//div[@class="article"]//ul/li/a/@href').extract()
items = []
for i in range(len(parentNames)):
parentName = "/Users/stonelqf/Desktop/sina/" + parentNames[i]
if not os.path.exists(parentName):
os.mkdir(parentName)
for j in range(len(chlidUrls)):
item = SinaspiderItem()
if chlidUrls[j].startswith(parentUrls[i]):
item['childUrl'] = chlidUrls[i]
chlidName = parentName + "/" + chlidNames[j]
if not os.path.exists(chlidName):
os.mkdir(chlidName)
item["contentFileUrl"] = chlidName + "/"
items.append(item)
for item in items:
yield scrapy.Request(url=item['childUrl'], meta={"meta_1": item}, callback=self.parse_child)
def parse_child(self, response):
meta = response.meta["meta_1"]
items = []
for node in response.xpath('//div/a/@href').extract():
if node.endswith(".shtml"):
item = SinaspiderItem()
item['contentFileUrl'] = meta['contentFileUrl']
item['childUrl'] = meta['childUrl']
item['fileUrl'] = node
items.append(item)
for item in items:
yield scrapy.Request(url=item['fileUrl'], meta={"meta_2": item}, callback=self.last)
def last(self, response):
meta2 = response.meta["meta_2"]
title = response.xpath("//h1[@class=main=title]/text()").extract()
if len(title) != 0:
item = SinaspiderItem()
item['contentFileUrl'] = meta2['contentFileUrl']
item["contentTitle"] = title[0]
contents = response.xpath('//div[@class ="article"]/p/text()').extract()
text=""
if len(contents) != 0:
for content in contents:
text += content
item["content"]=text
yield item
|
{"/sinaSpider/spiders/sinaNewSpider.py": ["/sinaSpider/items.py"]}
|
2,967
|
lqfGaara/sinaSpider
|
refs/heads/master
|
/sinaSpider/items.py
|
# -*- coding: utf-8 -*-
# Define here the models for your scraped items
#
# See documentation in:
# http://doc.scrapy.org/en/latest/topics/items.html
import scrapy
class SinaspiderItem(scrapy.Item):
# define the fields for your item here like:
childUrl=scrapy.Field()
# 文章标题
contentTitle=scrapy.Field()
# 文章内容
content=scrapy.Field()
# 文章保存路径
contentFileUrl=scrapy.Field()
# 文章的访问url
fileUrl=scrapy.Field()
|
{"/sinaSpider/spiders/sinaNewSpider.py": ["/sinaSpider/items.py"]}
|
2,968
|
lqfGaara/sinaSpider
|
refs/heads/master
|
/sinaSpider/pipelines.py
|
# -*- coding: utf-8 -*-
# Define your item pipelines here
#
# Don't forget to add your pipeline to the ITEM_PIPELINES setting
# See: http://doc.scrapy.org/en/latest/topics/item-pipeline.html
class SinaspiderPipeline(object):
def process_item(self, item, spider):
file = item['contentFileUrl'] + str(item["contentTitle"]).strip() + ".txt"
print(file)
with open(file, "w") as f:
if (len(item['content']) != 0):
f.write(item['content'])
return item
|
{"/sinaSpider/spiders/sinaNewSpider.py": ["/sinaSpider/items.py"]}
|
2,976
|
mnhampl/alma-slipsomat
|
refs/heads/master
|
/slipsomat/__init__.py
|
__version__ = '0.3.1-new_letter_configuration' # Use bumpversion to update
|
{"/slipsomat/configuration_table.py": ["/slipsomat/letter_info.py"]}
|
2,977
|
mnhampl/alma-slipsomat
|
refs/heads/master
|
/slipsomat/configuration_table.py
|
from __future__ import print_function
import os
import os.path
import re
import time
import sys
from selenium.common.exceptions import TimeoutException
from selenium.webdriver.common.by import By
from selenium.webdriver.support import expected_conditions as EC
from selenium.webdriver.remote.errorhandler import NoSuchElementException
from colorama import Fore, Back, Style
from .slipsomat import LetterContent
from .letter_info import LetterInfo
class ConfigurationTable(object):
"""Interface to "Customize letters" in Alma."""
def __init__(self, pagename, worker):
self.letter_infos = [] # array of LetterInfo objects
self.update_dates = []
self.worker = worker
self.pagename = pagename
self.css_selector_table_row = '.jsRecordContainer'
self.css_selector_button_template = '#cnew_letter_labeltemplate_span'
if pagename == 'Components Configuration':
self.css_selector_table = '#filesAndLabels'
self.css_selector_col_name = '#SELENIUM_ID_filesAndLabels_ROW_%d_COL_letterXslcfgFilefilename'
self.css_selector_col_customized = '#SELENIUM_ID_filesAndLabels_ROW_%d_COL_customized'
elif pagename == 'Letters Configuration':
self.css_selector_table = '#lettersOnPage'
self.css_selector_col_name = '#SELENIUM_ID_lettersOnPage_ROW_%d_COL_letterNameForUI'
self.css_selector_col_channel = '#SELENIUM_ID_lettersOnPage_ROW_%d_COL_channel'
self.css_selector_col_customized = '#SELENIUM_ID_lettersOnPage_ROW_%d_COL_customized'
else:
raise Exception()
def open(self):
"""Go from Alma start page to general configuration and open subpage"""
try:
# at page that lists letters?
self.worker.first(By.CSS_SELECTOR, self.css_selector_table)
except NoSuchElementException:
# not at page that lists letters?
self.print_letter_status('Opening table...', '')
# Goto Alma start page
self.worker.goto_alma_start_page()
# Open Alma configuration
self.worker.wait_for_and_click(By.CSS_SELECTOR, '#ALMA_MENU_TOP_NAV_configuration')
# Open configuration "General"
self.worker.click(By.XPATH, '//*[@href="#CONF_MENU6"]')
# Open Subpage
self.worker.click(By.XPATH, '//*[text() = "' + self.pagename + '"]')
self.worker.wait_for(By.CSS_SELECTOR, self.css_selector_table)
return self
def modified(self, name):
# idx = self.names.index(name)
# return self.update_dates[idx]
return ""
def set_modified(self, name, date):
# Allow updating a single date instead of having to re-read the whole table
idx = self.letter_infos.index(name)
self.update_dates[idx] = date
def print_letter_status(self, string, msg, progress=None, newline=False):
sys.stdout.write('\r{:100}'.format('')) # We clear the line first
if progress is not None:
sys.stdout.write('\r[{}] {:60} {}'.format(
progress,
string.split('/')[-1],
msg
))
else:
sys.stdout.write('\r{:60} {}'.format(
string.split('/')[-1],
msg
))
if newline:
sys.stdout.write('\n')
sys.stdout.flush()
def read(self):
self.letter_infos = []
# number of letters on page
elems_rows = self.worker.all(By.CSS_SELECTOR, self.css_selector_table_row)
# first try: only read the first page
for i in range(0, len(elems_rows)):
name = self.worker.all(By.CSS_SELECTOR, self.css_selector_col_name % i)[0].text
if self.pagename == 'Letters Configuration':
channel = self.worker.all(By.CSS_SELECTOR, self.css_selector_col_channel % i)[0].text
else:
channel = None
letter_info = LetterInfo(name, i, channel)
self.letter_infos.append(letter_info)
print(str(i+1) + ': ' + letter_info.unique_name)
# # Read the modification date column
# elems = self.worker.all(By.CSS_SELECTOR,
# '#lettersOnPage tr > td:nth-child(%d) > span' % updatedate_col)
# self.update_dates = [el.text for el in elems]
#
# # return [{x[0]:2 {'modified': x[1], 'index': n}} for n, x in enumerate(zip(names, update_dates))]
def is_customized(self, name):
index = self.letter_infos.index(name)
css_selector_element = self.css_selector_col_customized % index
self.worker.wait_for(By.CSS_SELECTOR, css_selector_element)
updated_by = self.worker.first(By.CSS_SELECTOR, css_selector_element)
return updated_by.text not in ('-', 'Network')
def assert_page_title(self, page_title):
""" Assert that we are at the right letter """
# on subpage??
self.worker.wait_for(By.CSS_SELECTOR, self.css_selector_button_template)
element = self.worker.wait.until(
EC.presence_of_element_located((By.CSS_SELECTOR, '.pageTitle'))
)
elt = element.text
assert elt == page_title, "%r != %r" % (elt, page_title)
def open_letter(self, letter_info):
self.open()
# Open a letter and return its contents as a LetterContent object.
index = self.letter_infos.index(letter_info)
self.worker.wait.until(EC.presence_of_element_located(
(By.CSS_SELECTOR, self.css_selector_col_name % index))
)
time.sleep(0.2)
# Open Letter configuration
self.worker.scroll_into_view_and_click((self.css_selector_col_name + ' a') % index, By.CSS_SELECTOR)
time.sleep(0.2)
# We should now be at the letter edit form. Assert that page title is correct
self.assert_page_title(letter_info.name)
# goto tab "Template"
# Click tab "Template" menu item
css_selector_link = self.css_selector_button_template + ' a'
self.worker.wait_for(By.CSS_SELECTOR, css_selector_link)
self.worker.scroll_into_view_and_click(css_selector_link, By.CSS_SELECTOR)
css_selector_template_textarea = 'pageBeanfileContent'
self.worker.wait_for(By.ID, css_selector_template_textarea)
txtarea = self.worker.first(By.ID, css_selector_template_textarea)
return LetterContent(txtarea.text)
def close_letter(self):
# If we are at specific letter, press the "Cancel" button.
elems = self.worker.all(By.CSS_SELECTOR, '.pageTitle')
if len(elems) != 0:
btn_selector = '#PAGE_BUTTONS_cbuttonnavigationcancel'
self.worker.scroll_into_view_and_click(btn_selector, By.CSS_SELECTOR)
def put_contents(self, letter_info, content):
"""
Save letter contents to Alma.
This method assumes the letter has already been opened.
"""
self.assert_page_title(letter_info.name)
# The "normal" way to set the value of a textarea with Selenium is to use
# send_keys(), but it took > 30 seconds for some of the larger letters.
# So here's a much faster way:
txtarea = self.worker.first(By.ID, 'pageBeanfileContent')
txtarea_id = txtarea.get_attribute('id')
value = content.text.replace('"', '\\"').replace('\n', '\\n')
script = 'document.getElementById("%s").value = "%s";' % (txtarea_id, value)
self.worker.driver.execute_script(script)
# Submit the form
try:
btn = self.worker.first(By.ID, 'PAGE_BUTTONS_cbuttonsave')
except NoSuchElementException:
btn = self.worker.first(By.ID, 'PAGE_BUTTONS_cbuttoncustomize')
btn.click()
# Wait for the table view.
# Longer timeout per https://github.com/scriptotek/alma-slipsomat/issues/33
self.worker.wait_for(By.CSS_SELECTOR, '.typeD table', timeout=40)
return True
def pull(self, local_storage, status_file):
count_new = 0
count_changed = 0
self.open()
self.read()
for idx, letter_info in enumerate(self.letter_infos):
progress = '%3d/%3d' % ((idx + 1), len(self.letter_infos))
self.print_letter_status(letter_info.unique_name, '', progress)
self.print_letter_status(letter_info.unique_name, 'checking...', progress)
# --- Bug, skip webhook letters
if letter_info.unique_name.endswith('-WEBHOOK'):
self.print_letter_status(
letter_info.unique_name, Fore.RED + 'skipped WEBHOOK' + Style.RESET_ALL, progress, True)
continue
# --- End Bug, Letter
try:
content = self.open_letter(letter_info)
# if self.is_customized(letter_info):
# content = self.open_letter(letter_info)
# else:
# content = self.open_default_letter(letter_info)
except TimeoutException:
# Retry once
self.print_letter_status(letter_info.unique_name, 'retrying...', progress)
# if self.is_customized(letter_info):
content = self.open_letter(letter_info)
# else:
# content = self.open_default_letter(letter_info)
self.close_letter()
old_sha1 = status_file.checksum(letter_info.get_filename())
if content.sha1 == old_sha1:
self.print_letter_status(letter_info.unique_name, 'no changes', progress, True)
continue
if not local_storage.store(letter_info, content, self.modified(letter_info)):
self.print_letter_status(
letter_info.unique_name, Fore.RED + 'skipped due to conflict' + Style.RESET_ALL, progress, True)
continue
if old_sha1 is None:
count_new += 1
self.print_letter_status(letter_info.unique_name, Fore.GREEN + 'fetched new letter @ {}'.format(
content.sha1[0:7]) + Style.RESET_ALL, progress, True)
else:
count_changed += 1
self.print_letter_status(letter_info.unique_name, Fore.GREEN + 'updated from {} to {}'.format(
old_sha1[0:7], content.sha1[0:7]) + Style.RESET_ALL, progress, True)
sys.stdout.write(Fore.GREEN + 'Fetched {} new, {} changed letters\n'.format(
count_new, count_changed) + Style.RESET_ALL)
|
{"/slipsomat/configuration_table.py": ["/slipsomat/letter_info.py"]}
|
2,978
|
mnhampl/alma-slipsomat
|
refs/heads/master
|
/slipsomat/letter_info.py
|
class LetterInfo(object):
"""Interface to "Customize letters" in Alma."""
def __init__(self, name, index, channel):
self.name = name
self.index = index
self.channel = channel
self.unique_name = name + '-' + channel if channel else name
# if channel:
# self.unique_name = name + '-' + channel
# else:
# self.unique_name = name
def get_filename(self):
filename = './' + self.unique_name.replace(' ', '_')
# file ending
if not(filename.endswith('.xsl')):
filename += '.xsl'
return filename
|
{"/slipsomat/configuration_table.py": ["/slipsomat/letter_info.py"]}
|
3,003
|
abndre/TensaoResidual
|
refs/heads/master
|
/P_L_/P_L_PB_1_/read_raw.py
|
#-------------------------------------------------------------------------------
# Name: module1
# Purpose:
#
# Author: Andrezio
#
# Created: 23/07/2017
# Copyright: (c) Andrezio 2017
# Licence: <your licence>
#-------------------------------------------------------------------------------
file_name='P_L_PB_1_.raw'
datafile = file(file_name)
import fabio
image = fabio.open(file_name)
|
{"/calc_stress.py": ["/commands.py"]}
|
3,004
|
abndre/TensaoResidual
|
refs/heads/master
|
/calc_stress.py
|
#import matplotlib.pyplot as plt
#from commands import multi, removerbackground,removekalpha, normalizar, removerzero, background,processing_of_data, lenar_calc, read_file,center_psi, red_file_rigaku,red_files_chimazu
from commands import red_file_rigaku,red_files_chimazu
if __name__ == "__main__":
print('Start')
#red_files_chimazu('P_L_','P_L_PB_3_')
red_files_chimazu('popb','Po_PB_7,1_')
#red_file_rigaku ('P_L_1/P_PB_L_{}.ASC'.format(7))
|
{"/calc_stress.py": ["/commands.py"]}
|
3,005
|
abndre/TensaoResidual
|
refs/heads/master
|
/window.py
|
#-------------------------------------------------------------------------------
# Purpose:
#
# Author: Andre Santos Barros da Silva
#
# Created: 27/07/2018
# Copyright:
# Licence:
#-------------------------------------------------------------------------------
from tkinter import *
root = Tk()
root.title('Notebook')
texto = Label(root,text='SHOW').place(x=10,y=5)
horizontal=0
vertical=40
btnPlotar = Button(root, text="SAMPLE").place(x=horizontal,y=vertical)
vertical+=30
btnPlotar = Button(root, text="PLOT").place(x=horizontal,y=vertical)
vertical+=30
btnResetar = Button(root, text="RESET").place(x=horizontal,y=vertical)
vertical+=30
btnPlotar = Button(root, text="CLOSE").place(x=horizontal,y=vertical)
vertical+=30
btnPlotar = Button(root, text="BACK").place(x=horizontal,y=vertical)
vertical+=30
btnPlotar = Button(root, text="DOWNLOAD").place(x=horizontal,y=vertical)
#menu
menubar = Menu(root)
filemenu= Menu(menubar)
filemenu.add_command(label="Open File")
filemenu.add_command(label="Close")
filemenu.add_separator()
menubar.add_cascade(label="File",menu=filemenu)
helpmenu = Menu(menubar)
helpmenu.add_command(label="Help Index")
helpmenu.add_command(label="About")
menubar.add_cascade(label="Help",menu=helpmenu)
root.config(menu=menubar)
root.title("Cristal Mat - Xtress - IPEN")
root.geometry("650x380+10+10")
root.mainloop()
|
{"/calc_stress.py": ["/commands.py"]}
|
3,006
|
abndre/TensaoResidual
|
refs/heads/master
|
/commands.py
|
import numpy as np
import matplotlib.pyplot as plt
from scipy.signal import savgol_filter
from lmfit.models import VoigtModel,PseudoVoigtModel, LinearModel
from scipy import stats
def LPM(theta,psi):
radians = np.radians(theta)
radiansby2 = np.radians(theta/2)
radianpsi = np.radians(psi)
cima = 1 + np.cos(radians)**2
baixo = np.sin(radiansby2)**2
lado = 1 - np.tan(radianpsi)/np.tan(radiansby2)
LPM_value = (cima/baixo)*lado
return LPM_value
def Lorentz_polarization_modified(psi,x,y):
new_list =[]
for key, value in enumerate(x):
new = LPM(value,psi)
new_list.append(y[key]/new)
#import pdb;pdb.set_trace()
return (new_list)
def plotar_intensity_position():
plt.grid()
plt.legend(loc=0)
plt.xlabel('Position (2/Theta)')
plt.ylabel('Intensity(u.a.)')
plt.show()
#return K const, based in sample
def multi(E=210000,v=0.3,theta2=156):
theta2/=2
V=2.0*(1.0+v)
theta = np.radians(theta2)
theta = np.tan(theta)
theta = 1.0/theta
theta *= (np.pi/180.0)
theta *=E
theta /=-1.0*V
## return theta/9.8#kg
return theta#Mpa
##################################
#Cleand Data
#return novot
def removekalpha(x,y):
lambida2=1.541220
lambida1=1.537400
deltaL = lambida2 - lambida1
deltaL = deltaL/lambida1
diferenca=x[1]-x[0]
minimo=min(y)
novoy=[]
for i in range(len(y)):
deltasoma = x[1]-x[0]
ase= np.tan(np.radians(x[i]/2))*2*deltaL/(diferenca)
n=1;
while(ase>deltasoma):
deltasoma=deltasoma+diferenca
n+=1
try:
yy=y[i]-0.5*y[i-n]
if yy<0:yy=(yy+y[i])/8
if yy<0:yy=minimo
novoy.append(yy)
except:
novoy.append(y[i])
return novoy
#return y
def background(y):
minimo=min(y)
for i in range(len(y)):
y[i]-=minimo
return y
#return y
def normalizar(y):
minimo=max(y)
for i in range(len(y)):
y[i]/=minimo
return y
def removerzero(vetor):
for key, value in enumerate(vetor):
if value <0:
vetor[key]=0
for key,value in enumerate(vetor):
try:
if vetor[key+1]==0 and value >0:
vetor[key]=0
except:
pass
return vetor
def removerbackground(x,y,m=5):
minimo= np.mean( np.sort(y)[:10])
for i in range(len(y)):
y[i]=y[i]-minimo
slope, intercept, r_value, p_value, std_err = stats.linregress(np.append(x[:m],x[-m:]),np.append(y[:m],y[-m:]))
abline_values = [slope * i + intercept for i in x]
abline_values=np.asarray(abline_values)
return removerzero(y-abline_values)
#Cleand Data
def processing_of_data(psi,x,y):
#y = normalizar(y)
y = background(y)
y = removerbackground(x,y)
#import pdb;pdb.set_trace()
#plt.plot(y)
y = Lorentz_polarization_modified(psi,x,y)
#plt.plot(y);plt.show();import pdb;pdb.set_trace()
y = removekalpha(x,y)
y = savgol_filter(y, 5, 2)
y = normalizar(y)
return y
def lenar_calc(x,y):
mod = LinearModel()
pars = mod.guess(y, x=x)
out = mod.fit(y, pars, x=x)
calc= out.best_values['slope']
stress=calc*multi()
stress=round(stress,3)
#plt.plot(x,out.bes_fit)
return stress, x , out.best_fit,out
#print(out.best_values)
def read_file(file_name):
psi=0
try:
r = open(file_name,'r',encoding = "ISO-8859-1")
except:
return False
printar = False
vx = []
vy = []
for i in r:
if printar:
value = i.split(' ')
x=value[3]
x = float(x)
vx.append(x)
y=value[-1].split('\n')[0]
y =float(y)
vy.append(y)
if not printar and '<2Theta> < I >' in i:
printar = True
if not printar and 'psi angle' in i:
value = i.split(' ')
psi=float(value[-3])
psi=np.sin(np.radians(psi))**2
vx = np.asarray(vx)
vy = np.asarray(vy)
return psi, vx, vy
def calc_center_pseudoVoigt(vx,vy):
mod = PseudoVoigtModel()
y = vy
pars = mod.guess(y, x=vx)
out = mod.fit(y, pars, x=vx)
center = out.best_values['center']
return center
def parabol(x):
import pdb; pdb.set_trace()
# for key, value in enumerate(x):
def center_psi(file_name):
#print(file_name)
psi, vx, vy = read_file(file_name)
vy = processing_of_data(psi,vx,vy)
legenda = file_name.split('/')[-1]
#plt.grid()
#plt.legend(loc=0)
#import pdb; pdb.set_trace()
plt.plot(vx,vy,label=legenda)
mod = PseudoVoigtModel()
y=vy
pars = mod.guess(y, x=vx)
out = mod.fit(y, pars, x=vx)
center =out.best_values['center']
print('center: {} <--> psi: {}'.format(center,psi))
return psi, center
#Medidas Rigaku
def get_value(i):
return float(i.split(' ')[-1].split('\n')[0])
#list_keys = list(dicio.keys())
def red_file_rigaku(folder_name):
dicio={
'*START':0.0,
'*STOP' :0.0,
'*STEP' :0.0,
'*ST_PSI_ANGLE':0.0
}
dados={}
file ='P_L_1/P_PB_L_1.ASC'
file = folder_name
r = open(file,'r')
find_intensity=False
x=[]
y=[]
for i in r:
#print(i)
if '*END' in i:
find_intensity=False
vx = np.asarray(x)
vy = np.asarray(y)
vy = processing_of_data(dicio['*ST_PSI_ANGLE'],vx,vy)
#import pdb; pdb.set_trace()
plt.plot(vx,vy,label=dicio['*ST_PSI_ANGLE'])
#plt.plot(vy)
dados[dicio['*ST_PSI_ANGLE']]={}
dados[dicio['*ST_PSI_ANGLE']]['x']=vx
dados[dicio['*ST_PSI_ANGLE']]['y']=vy
x=[]
y=[]
elif find_intensity:
value = i.split(',')
for i in value:
if len(x)==0:
x.append(dicio['*START'])
y.append(float(i))
dicio['*NEW_DICIO']=(dicio['*START']+dicio['*STEP'])
else:
x.append(dicio['*NEW_DICIO'])
dicio['*NEW_DICIO']=(dicio['*NEW_DICIO']+dicio['*STEP'])
y.append(float(i))
elif '*START' in i:
dicio['*START']=get_value(i)
elif '*STOP' in i:
dicio['*STOP']=get_value(i)
elif '*STEP' in i:
dicio['*STEP']=get_value(i)
elif '*ST_PSI_ANGLE' in i:
dicio['*ST_PSI_ANGLE']=get_value(i)
elif '*COUNT' in i and not '*COUNTER' in i:
find_intensity=True
plotar_intensity_position()
center_list =[]
psi_list =[]
for key, value in dados.items():
psi_list.append(np.sin(np.radians(key))**2)
center = calc_center_pseudoVoigt(value['x'],value['y'])
center_list.append(center)
print('center: {} <--> psi: {}'.format(center,np.sin(np.radians(key))**2))
legenda ,x,bestY, out= lenar_calc(psi_list,center_list)
plt.plot(psi_list,center_list,'o',label='Values')
plt.plot(x,bestY,label='Best')
miny=int(min(center_list))-2
maxy=int(max(center_list))+2
maxx=round(max(psi_list),3)+round(max(psi_list),3)/2
plt.axis([0,maxx,miny,maxy])
plt.grid()
#plt.title(dados)
plt.legend()
plt.xlabel('$\sin ^{2}\omega (Mpa)$')
plt.ylabel('$2\Theta (Degre)$')
#import pdb;pdb.set_trace()
plt.title('{}'.format(legenda))
plt.show()
#Chimazu
def red_files_chimazu(folderbefore,folder_name):
#dados='P_L_PB_3_'
center_list =[]
psi_list =[]
dados = folder_name
first_file='{}/{}/{}.txt'.format(folderbefore,dados,dados)
file_names=[]
file_names.append(first_file)
for i in range(1,10):
file_name='{}/{}{}/{}{}.txt'.format(folderbefore,dados,str(i),dados,str(i))
file_names.append(file_name)
for file_name in file_names:
psi, center = center_psi(file_name)
psi_list.append(psi)
center_list.append(center)
plotar_intensity_position()
#print(psi_list)
#print(center_list)
miny=int(min(center_list))-2
maxy=int(max(center_list))+2
maxx=round(max(psi_list),3)+round(max(psi_list),3)/2
plt.axis([0,maxx,miny,maxy])
plt.grid()
plt.title(dados)
plt.xlabel('$\sin ^{2}\omega (Mpa)$')
plt.ylabel('$2\Theta (Degre)$')
legenda ,x,bestY,out= lenar_calc(psi_list,center_list)
#plt.legend(legenda)
plt.plot(psi_list,center_list,'o',label=('{}'.format(legenda)))
plt.plot(x,bestY)
plt.legend(loc=0)
plt.show()
|
{"/calc_stress.py": ["/commands.py"]}
|
3,020
|
Gatszow/CarsScrapper
|
refs/heads/master
|
/database.py
|
import mysql.connector
from secret import password
from scrapper import CarsScrapper
def difference(list1, list2):
list_dif = [i for i in list1 + list2 if i not in list1 or i not in list2]
return list_dif
class DatabaseUpdater(object):
def __init__(self):
self.mydb = mysql.connector.connect(
host='localhost',
user='root',
password=password,
database='test'
)
self.mycursor = self.mydb.cursor()
# Database creation
# mycursor.execute('CREATE DATABASE test')
# Table creation
self.mycursor.execute(
'CREATE TABLE IF NOT EXISTS Cars ('
'CarID INT PRIMARY KEY AUTO_INCREMENT, '
'Make VARCHAR(30), '
'Model VARCHAR(30), '
'Mileage_km MEDIUMINT UNSIGNED, '
'ProductionYear YEAR, '
'FuelType ENUM("Benzyna", "Benzyna+LPG", "Benzyna+CNG", '
'"Diesel", "Elektryczny", "Etanol", "Hybryda", "Wodór", "Failed to get"), '
'EngineSize_cm3 SMALLINT UNSIGNED, '
'URL VARCHAR(500), '
'Price MEDIUMINT UNSIGNED, '
'Currency VARCHAR(10), '
'Negotiable ENUM("True", "False", "Failed to get") NOT NULL)'
)
self.values = CarsScrapper.search
self.without = []
def check(self):
self.values = list(set(self.values))
self.mycursor.execute('SELECT * FROM Cars')
for record in self.mycursor:
for row in range(len(self.values)):
if record[1] == self.values[row][0] and record[2] == self.values[row][1] \
and record[3] == self.values[row][2] and record[8] == self.values[row][7] \
and record[9] == self.values[row][8]:
self.without.append(self.values[row])
values = difference(self.without, self.values)
return values
def add(self):
data = self.check()
self.mycursor.executemany('INSERT INTO Cars Values (%s, %s, %s, %s, %s, %s, %s, %s, %s, %s)',
data)
self.mydb.commit()
def show(self):
self.mycursor.execute('SELECT * FROM Cars')
for x in self.mycursor:
print(x)
DatabaseUpdater().show()
|
{"/database.py": ["/scrapper.py"], "/scrapper.py": ["/exceptions.py"], "/main.py": ["/database.py"]}
|
3,021
|
Gatszow/CarsScrapper
|
refs/heads/master
|
/scrapper.py
|
from selenium import webdriver
from exceptions import WrongThingToGetError
from selenium.webdriver.common.by import By
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
from selenium.common.exceptions import NoSuchElementException as NSEE, ElementNotInteractableException as ENIE
def change_to_int(string: str) -> int:
string = string.replace(' ', '')
while True:
try:
string = int(string)
break
except ValueError:
string = string[:-1]
return string
def is_negotiable(string: str) -> str:
if string == 'Do negocjacji':
string = 'True'
else:
string = 'False'
return string
def get_price_and_currency(price_with_currency: str):
price_with_currency = price_with_currency.replace(' ', '')
name_of_currency = []
for z in range(len(price_with_currency)):
try:
int(price_with_currency)
break
except ValueError:
name_of_currency.append(price_with_currency[len(price_with_currency) - 1:])
price_with_currency = price_with_currency[:-1]
name_of_currency.reverse()
name_of_currency = ''.join(name_of_currency)
return int(price_with_currency), name_of_currency
class CarsScrapper(object):
def __init__(self):
self.url = 'https://www.otomoto.pl/osobowe/' \
'?search%5Bfilter_float_price%3Ato%5D=20000&search' \
'%5Bfilter_float_mileage%3Ato%5D=150000&search' \
'%5Bfilter_enum_fuel_type%5D%5B0%5D=petrol&search' \
'%5Bfilter_enum_fuel_type%5D%5B1%5D=petrol-lpg&search' \
'%5Bfilter_enum_damaged%5D=0&search' \
'%5Bfilter_enum_no_accident%5D=1&search' \
'%5Border%5D=created_at%3Adesc&search%5Bbrand_program_id%5D' \
'%5B0%5D=&search%5Bcountry%5D=&view=list&page=209'
self.driver = webdriver.Firefox()
self.driver.get(self.url)
self.isclosed = False
self.list_of_tuples = []
self.count = 1
self.makes = []
self.excluded_makes = ['Alfa Romeo', 'Aston Martin', 'De Lorean', 'Land Rover', 'DS Automobiles']
self.models = []
self.mileages = []
self.years = []
self.fuels = []
self.engine_sizes = []
self.urls = []
self.prices = []
self.currencies = []
self.negotiable = []
def get_products_make_and_model(self, title_class_name: str):
titles = self.driver.find_elements_by_class_name(title_class_name)
for title in titles:
if self.excluded_makes[0] in title.text or self.excluded_makes[1] in title.text or self.excluded_makes[2] \
in title.text or self.excluded_makes[3] in title.text or self.excluded_makes[4] in title.text:
self.models.append(' '.join((title.text.split()[2:])))
temp_makes = title.text.split()[:2]
make = ' '.join(temp_makes)
self.makes.append(make)
temp_makes.clear()
else:
self.models.append(' '.join((title.text.split()[1:])))
self.makes.append(title.text.split()[0])
return self.makes, self.models
def get_products(self, thing_to_get, counter):
try:
if thing_to_get == 'mileage':
for i in range(1, counter + 1):
try:
mileage = self.driver.find_element_by_xpath(
f'/html/body/div[4]/div[2]/section/div[2]/div[1]/div/div[1]/div[5]/article[{i}'
f']/div[2]/ul/li[2]/span')
self.mileages.append(change_to_int(mileage.text))
except NSEE:
mileage = 0000
self.mileages.append(mileage)
return self.mileages
elif thing_to_get == 'year':
for i in range(1, counter + 1):
try:
year = self.driver.find_element_by_xpath(
f'/html/body/div[4]/div[2]/section/div[2]/div[1]/div/div[1]/div[5]/article[{i}'
f']/div[2]/ul/li[1]/span')
self.years.append(int(year.text))
except NSEE:
year = 0000
self.years.append(year)
return self.years
elif thing_to_get == 'fuel':
for i in range(1, counter + 1):
try:
fuel = self.driver.find_element_by_xpath(
f'/html/body/div[4]/div[2]/section/div[2]/div[1]/div/div[1]/div[5]/article[{i}'
f']/div[2]/ul/li[4]/span')
self.fuels.append(fuel.text)
except NSEE:
fuel = 'Failed to get'
self.fuels.append(fuel)
return self.fuels
elif thing_to_get == 'engine_size':
for i in range(1, counter + 1):
try:
engine_size = self.driver.find_element_by_xpath(
f'/html/body/div[4]/div[2]/section/div[2]/div[1]/div/div[1]/div[5]/article[{i}'
f']/div[2]/ul/li[3]/span')
self.engine_sizes.append(change_to_int(engine_size.text))
except NSEE:
engine_size = 0000
self.engine_sizes.append(engine_size)
return self.engine_sizes
elif thing_to_get == 'url':
self.urls = [url.get_attribute('href') for url in
self.driver.find_elements_by_class_name('offer-title__link')]
return self.urls
else:
raise WrongThingToGetError
except WrongThingToGetError:
print('Wrong thing to get')
def get_products_price_and_currency(self, counter):
for i in range(1, counter + 1):
try:
price = self.driver.find_element_by_xpath(
f'/html/body/div[4]/div[2]/section/div[2]/div[1]/div/div[1]/div[5]/article[{i}'
f']/div[2]/div[2]/div/div[1]/span')
value, currency = get_price_and_currency(price.text)
self.prices.append(value)
self.currencies.append(currency)
except NSEE:
value = 0000
currency = 'Failed'
self.prices.append(value)
self.currencies.append(currency)
try:
negotiable = self.driver.find_element_by_xpath(
f'/html/body/div[4]/div[2]/section/div[2]/div[1]/div/d'
f'iv[1]/div[5]/article[{i}]/div[2]/div[2]/div/span').text
self.negotiable.append(is_negotiable(negotiable))
except NSEE:
negotiable = 'Failed to get'
self.negotiable.append(negotiable)
return self.prices, self.currencies, self.negotiable
def search(self):
while True:
if self.isclosed:
break
else:
number_of_articles = len(self.driver.find_elements_by_tag_name('article'))
makes, models = self.get_products_make_and_model('offer-title__link')
mileages = self.get_products('mileage', number_of_articles)
years = self.get_products('year', number_of_articles)
fuels = self.get_products('fuel', number_of_articles)
engine_sizes = self.get_products('engine_size', number_of_articles)
urls = self.get_products('url', number_of_articles)
prices, currencies, negotiable = self.get_products_price_and_currency(number_of_articles)
for i in range(number_of_articles):
temporary_list = (makes[i], models[i], mileages[i], years[i], fuels[i],
engine_sizes[i], urls[i], prices[i], currencies[i], negotiable[i])
self.list_of_tuples.append(temporary_list)
print(temporary_list)
del temporary_list
makes.clear(), models.clear(), mileages.clear(), years.clear(), fuels.clear(), engine_sizes.clear()
urls.clear(), prices.clear(), currencies.clear(), negotiable.clear()
self.next_page()
return self.list_of_tuples
def next_page(self):
try:
interupting_element = self.driver.find_element_by_xpath('/html/body/div[4]/div[15]/div/div/a')
interupting_element.click()
except ENIE:
pass
li_index = len(self.driver.find_element_by_xpath('/html/body/div[4]/div[2]/section/div[2]/div[2]/ul')
.find_elements_by_tag_name('li'))
if li_index == 7 and self.count == 2:
self.isclosed = True
self.driver.close()
elif self.count == 1:
nexts = WebDriverWait(self.driver, 10).until(
EC.presence_of_element_located(
(By.XPATH, f"/html/body/div[4]/div[2]/section/div[2]/div[2]/ul/li[{li_index}]/a"))
)
nexts.click()
self.count = 2
else:
nexts = WebDriverWait(self.driver, 20).until(EC.presence_of_element_located(
(By.XPATH, f'/html/body/div[4]/div[2]/section/div[2]/div[2]/ul/li[{li_index}]/a'))
)
nexts.click()
if __name__ == '__main__':
temp = CarsScrapper().search()
print(temp)
|
{"/database.py": ["/scrapper.py"], "/scrapper.py": ["/exceptions.py"], "/main.py": ["/database.py"]}
|
3,022
|
Gatszow/CarsScrapper
|
refs/heads/master
|
/exceptions.py
|
class TooSmallNumberOfRowError(Exception):
pass
class WrongThingToGetError(Exception):
pass
|
{"/database.py": ["/scrapper.py"], "/scrapper.py": ["/exceptions.py"], "/main.py": ["/database.py"]}
|
3,023
|
Gatszow/CarsScrapper
|
refs/heads/master
|
/main.py
|
from database import DatabaseUpdater
if __name__ == '__main__':
DatabaseUpdate = DatabaseUpdater
|
{"/database.py": ["/scrapper.py"], "/scrapper.py": ["/exceptions.py"], "/main.py": ["/database.py"]}
|
3,030
|
RobinHeath-Albuquerque/robin_heath_RPSLS
|
refs/heads/main
|
/rpsls.py
|
import random
from Game import Game, my_rules, my_gestures
from Computer import Computer, computer
from unittest import result
x = input('Please enter your name:')
print('Hello, ' + x + '. Good luck!')
print()
print('Here are the rules:')
for x in my_rules:
print(x)
print()
print('The best of 3 will win the game!')
print()
playerOne_score = int(0)
computer_score = int(0)
score_limit = 5
while playerOne_score != score_limit or computer_score != score_limit:
playerOne: str = input(str("Please enter your gesture:")).lower()
computer_move = random.choice(my_gestures)
print("The computer chooses", computer_move)
if computer_move == "rock" and playerOne == "rock":
print("Tie!!")
if computer_move == "paper" and playerOne == "paper":
print("Tie!!")
if computer_move == "scissors" and playerOne == "scissors":
print("Tie!!")
if computer_move == "lizard" and playerOne == "lizard":
print("Tie!!")
if computer_move == "Spock" and playerOne == "Spock":
print("Tie!!")
elif computer_move == "paper" and playerOne == "rock" or "Spock":
print("The computer scores")
computer_score = computer_score + 1
print("The computers score is:", computer_score)
elif computer_move == "rock" and playerOne == "paper" or "Spock":
print(x + " scores")
playerOne_score = playerOne_score + 1
print("Your score is:", playerOne_score)
elif computer_move == "rock" and playerOne == "scissors" or "lizard":
print("The computer scores")
computer_score = int(computer_score) + 1
print("The computers score is:", computer_score)
elif computer_move == "scissors" and playerOne == "rock" or "Spock":
print(x + " scores")
playerOne_score = playerOne_score + 1
print("Your score is:", playerOne_score)
elif computer_move == "paper" and playerOne == "scissors" or "lizard":
print(x + " scores")
playerOne_score = playerOne_score + 1
print("Your score is:", playerOne_score)
elif computer_move == "scissors" and playerOne == "paper" or "lizard":
print("The computer scores")
computer_score = int(computer_score) + 1
print("The computers score is:", computer_score)
elif playerOne_score == score_limit:
print("Congrats! You won!")
elif computer_score == score_limit:
print("The computer won, better luck next time")
|
{"/rpsls.py": ["/Game.py", "/Computer.py"], "/main.py": ["/Game.py", "/Players.py", "/Lizard.py", "/Spock.py", "/Paper.py", "/Scissors.py"], "/Human.py": ["/Players.py"], "/Computer.py": ["/Players.py", "/Game.py"]}
|
3,031
|
RobinHeath-Albuquerque/robin_heath_RPSLS
|
refs/heads/main
|
/Spock.py
|
class Spock:
def __init__(self):
self.name = 'Spock'
self.loses_to = ['Lizard', 'Paper']
|
{"/rpsls.py": ["/Game.py", "/Computer.py"], "/main.py": ["/Game.py", "/Players.py", "/Lizard.py", "/Spock.py", "/Paper.py", "/Scissors.py"], "/Human.py": ["/Players.py"], "/Computer.py": ["/Players.py", "/Game.py"]}
|
3,032
|
RobinHeath-Albuquerque/robin_heath_RPSLS
|
refs/heads/main
|
/Scissors.py
|
class Scissors:
def __init__(self):
self.name = 'Scissors'
self.loses_to = ['Rock', 'Spock']
|
{"/rpsls.py": ["/Game.py", "/Computer.py"], "/main.py": ["/Game.py", "/Players.py", "/Lizard.py", "/Spock.py", "/Paper.py", "/Scissors.py"], "/Human.py": ["/Players.py"], "/Computer.py": ["/Players.py", "/Game.py"]}
|
3,033
|
RobinHeath-Albuquerque/robin_heath_RPSLS
|
refs/heads/main
|
/Game.py
|
from random import randrange, random
class Game:
def __init__(self, gestures, rules,):
self.name = ()
self.gestures = my_gestures
self.rules = my_rules
my_gestures = ['rock', 'Spock', 'paper', 'lizard', 'scissors']
my_rules = ['Rock crushes Scissors' 'Scissors cuts Paper', 'Paper covers Rock', 'Rock crushes Lizard', 'Lizard poisons '
'Spock',
'Spock smashes Scissors', 'Scissors decapitates Lizard', 'Lizard eats Paper', 'Paper disproves Spock',
'Spock vaporizes Rock']
def result(winner_result, player_choice, computer_choice, win=2, lose=2, tie=None):
# accumulate the appropriate winner of game total
if result == 'win':
win += 1
elif result == 'lose':
lose += 1
else:
tie += 1
return result
|
{"/rpsls.py": ["/Game.py", "/Computer.py"], "/main.py": ["/Game.py", "/Players.py", "/Lizard.py", "/Spock.py", "/Paper.py", "/Scissors.py"], "/Human.py": ["/Players.py"], "/Computer.py": ["/Players.py", "/Game.py"]}
|
3,034
|
RobinHeath-Albuquerque/robin_heath_RPSLS
|
refs/heads/main
|
/main.py
|
import RPSLS
from Game import Game
from Players import Players
from Lizard import Lizard
from Spock import Spock
from Paper import Paper
from Scissors import Scissors
from Rock import Rock
if __name__ == '__main__':
game = Game()
game.run_game()
RPSLS.rpsls("rock")
RPSLS.rpsls("Spock")
RPSLS.rpsls("paper")
RPSLS.rpsls("lizard")
RPSLS.rpsls("scissors")
|
{"/rpsls.py": ["/Game.py", "/Computer.py"], "/main.py": ["/Game.py", "/Players.py", "/Lizard.py", "/Spock.py", "/Paper.py", "/Scissors.py"], "/Human.py": ["/Players.py"], "/Computer.py": ["/Players.py", "/Game.py"]}
|
3,035
|
RobinHeath-Albuquerque/robin_heath_RPSLS
|
refs/heads/main
|
/Lizard.py
|
class Lizard:
def __init__(self):
self.name = 'Lizard'
self.loses_to = ['Rock', 'Scissors']
|
{"/rpsls.py": ["/Game.py", "/Computer.py"], "/main.py": ["/Game.py", "/Players.py", "/Lizard.py", "/Spock.py", "/Paper.py", "/Scissors.py"], "/Human.py": ["/Players.py"], "/Computer.py": ["/Players.py", "/Game.py"]}
|
3,036
|
RobinHeath-Albuquerque/robin_heath_RPSLS
|
refs/heads/main
|
/Players.py
|
class Players:
def __init__(self, types):
self.choice = ''
self.types = my_players
my_players = ['human', 'computer']
|
{"/rpsls.py": ["/Game.py", "/Computer.py"], "/main.py": ["/Game.py", "/Players.py", "/Lizard.py", "/Spock.py", "/Paper.py", "/Scissors.py"], "/Human.py": ["/Players.py"], "/Computer.py": ["/Players.py", "/Game.py"]}
|
3,037
|
RobinHeath-Albuquerque/robin_heath_RPSLS
|
refs/heads/main
|
/Human.py
|
from Players import Players
class Human(Players):
def make_gesture(self):
print(self.gestures)
playerOne = Human()
playerOne.make_gesture()
playerTwo = Human()
|
{"/rpsls.py": ["/Game.py", "/Computer.py"], "/main.py": ["/Game.py", "/Players.py", "/Lizard.py", "/Spock.py", "/Paper.py", "/Scissors.py"], "/Human.py": ["/Players.py"], "/Computer.py": ["/Players.py", "/Game.py"]}
|
3,038
|
RobinHeath-Albuquerque/robin_heath_RPSLS
|
refs/heads/main
|
/Paper.py
|
class Paper:
def __init__(self):
self.name = 'Paper'
self.loses_to = ['Scissors', 'Lizard']
|
{"/rpsls.py": ["/Game.py", "/Computer.py"], "/main.py": ["/Game.py", "/Players.py", "/Lizard.py", "/Spock.py", "/Paper.py", "/Scissors.py"], "/Human.py": ["/Players.py"], "/Computer.py": ["/Players.py", "/Game.py"]}
|
3,039
|
RobinHeath-Albuquerque/robin_heath_RPSLS
|
refs/heads/main
|
/Computer.py
|
from Players import Players
import random
from Game import Game, my_gestures
class Computer(Players):
def __init__(self, choice):
self.choice = random.choice
def make_gesture(self):
print(self.choice)
computer = Computer
|
{"/rpsls.py": ["/Game.py", "/Computer.py"], "/main.py": ["/Game.py", "/Players.py", "/Lizard.py", "/Spock.py", "/Paper.py", "/Scissors.py"], "/Human.py": ["/Players.py"], "/Computer.py": ["/Players.py", "/Game.py"]}
|
3,045
|
deharahawa/batida-ponto
|
refs/heads/master
|
/app/serializer.py
|
# from marshmallow_jsonapi.flask import Schema
from marshmallow_jsonapi import fields
from marshmallow import ValidationError
from flask_marshmallow import Marshmallow
ma = Marshmallow()
def configure(app):
"""
Factory para poder configurar
"""
ma.init_app(app)
def must_not_be_blank(data):
"""
Valida que os dados nao estao em branco
"""
if not data:
raise ValidationError('Dado não informado')
# class UserSchema(Schema):
class UserSchema(ma.SQLAlchemyAutoSchema):
"""
Define o Schema do User
"""
id = fields.Integer()
nome_completo = fields.Str(required=True, validate=must_not_be_blank)
cpf = fields.Str(required=True,validate=must_not_be_blank)
email = fields.Str(required=True, validate=must_not_be_blank)
data_cadastro = fields.DateTime(dump_only=True)
# pontos = ma.Nested(PontoSchema, many=True)
# class PontoSchema(Schema):
class PontoSchema(ma.SQLAlchemyAutoSchema):
id = fields.Integer()
user = fields.Nested(UserSchema, validate=must_not_be_blank)
user_id = fields.Integer()
data_batida = fields.DateTime(dump_only=True)
tipo_batida = fields.Integer()
|
{"/app/checks.py": ["/app/serializer.py", "/app/models.py"], "/app/__init__.py": ["/app/models.py", "/app/serializer.py", "/app/users.py", "/app/checks.py"], "/app/users.py": ["/app/serializer.py", "/app/models.py"], "/app/models.py": ["/app/serializer.py"]}
|
3,046
|
deharahawa/batida-ponto
|
refs/heads/master
|
/app/checks.py
|
from flask import Blueprint, request, jsonify, current_app
from .serializer import PontoSchema
from .models import Ponto, User
from datetime import datetime
from marshmallow import ValidationError
import re
ponto_blueprint = Blueprint('checks', __name__)
def get_horas(dado):
"""
Usa regex para pegar as horas no formato UTC
"""
horas = re.findall('[0-9]{2}:[0-9]{2}:[0-9]{2}', dado)
return horas
def get_date(dado):
"""
Usa regex para pegar a data no formato UTC
"""
date = re.findall('[0-9]{4}-[0-9]{2}-[0-9]{2}', dado)
return date
def get_ano_mes_dia(dado):
"""
Usa regex para separar ano, mes e dia
"""
ano, mes, dia = re.split('[^0-9]+', dado)
return int(ano), int(mes), int(dia)
def get_hora_minutos_segs(dado):
"""
Usa regex para separar hora, minutos e segundos
"""
hora, minutos, segs = re.split('[^0-9]+', dado)
return int(hora), int(minutos), int(segs)
@ponto_blueprint.route('/ponto', methods=['POST'])
def cadastrar():
# Instancia PontoSchema
ponto_schema = PontoSchema()
json_data = request.json
# Checa se existem dados vindo na request
if not json_data:
return {"message": "Sem dados informados"}, 400
# Verificar se ha error ao realizar o load
try:
data, errors = ponto_schema.load(json_data)
except ValidationError as err:
return err.messages, 422
# Pega o user que esta batendo o ponto
user = User.query.filter_by(id = data['user_id']).first()
# Puxa todos os pontos batidos do usuario
ponto_anterior = Ponto.query.filter(Ponto.user_id == data['user_id'])
# Pega o ponto anterior para verificar se o usuario nao esta batendo o mesmo tipo de ponto 2 vezes
ponto_anterior = PontoSchema(many=True).jsonify(ponto_anterior)
# Converte para json
ponto_anterior_json = ponto_anterior.json
# Verifica se ja ha um ponto batido, senao nao ha anterior
if len(ponto_anterior_json) > 0:
# Guarda o tipo de batida de ponto anterior
tipo_batida_memory = 0
# Salva realmente o tipo de batida anterior
tipo_batida_memory = ponto_anterior_json[len(ponto_anterior_json)-1]['tipo_batida']
# Confere se o usuário nao esta batendo ponto em duplicata
if tipo_batida_memory != data['tipo_batida']:
tipo_batida_memory = data['tipo_batida']
else:
return {"message":"Ponto já batido"}
# Pega o horario atual
now = datetime.now()
if user is None:
# Cadastra um usuario para o ponto caso nao exista na base
user = User(nome_completo="Nao identificado", cpf="0", email='nao@identificado.com', data_cadastro=now)
# Cria o ponto
ponto = Ponto(user=user, user_id=data['user_id'], tipo_batida=data['tipo_batida'], data_batida=now)
# Salva as alteracoes no banco
current_app.db.session.add(ponto)
current_app.db.session.commit()
return ponto_schema.jsonify(ponto), 201
@ponto_blueprint.route('/pontos', methods=['GET'])
def mostrar():
"""
Seleciona todos os pontos batidos por todos os usuarios
"""
result = Ponto.query.all()
return PontoSchema(many=True).jsonify(result), 200
@ponto_blueprint.route('/pontos/<identificador>', methods=['GET'])
def mostrar_usuario(identificador):
"""
Mostra todos os pontos de um usuario especifico
"""
# Faz a query usando o user_id
result = Ponto.query.filter_by(user_id = identificador)
# Chama a funcao que calcula o total de horas para determinado usuario
horas_trabalhadas = calcula_horas(identificador)
# Pega o result da query feita anteriormente
result = PontoSchema(many=True).jsonify(result)
# Faz o append das horas trabalhadas no último ponto retornado
result.json[len(result.json)-1]['horas_trabalhadas'] = horas_trabalhadas.get('horas trabalhadas')
return jsonify(result.json), 200
@ponto_blueprint.route('/pontos-user/<identificador>', methods=['GET'])
def calcula_horas(identificador):
"""
Calcula as horas trabalhadas
"""
# Calcula as horas trabalhadas pelo user
data = Ponto.query.filter(Ponto.user_id == identificador)
# Pega o result da query
result_json = PontoSchema(many=True).jsonify(data)
# Cria listas para guardar entradas e saidas
entrada = []
saida = []
# Varre os campos do result da query para separar o que sao batidas de ponto de entrada e saida
for field in result_json.json:
if field['tipo_batida'] == 1:
entrada.append(field['data_batida'])
else:
saida.append(field['data_batida'])
# Precisa pegar o total de horas trabalhadas
horas_total = []
for i in range(len(saida)):
# Nao deve dar problemas porque contamos as saidas, se o funcionario deu entrada e ainda nao saiu o vetor de saidas vai ser automaticamente menor que o de entradas
# Pega a data de entrada
date_entrada = get_date(entrada[i])
ano_entrada, mes_entrada, dia_entrada = get_ano_mes_dia(date_entrada[0])
# Pega a data de saida
date_saida = get_date(saida[i])
ano_saida, mes_saida, dia_saida = get_ano_mes_dia(date_saida[0])
# Faz algumas verificacoes para nao realizar comparacoes que nao fazem sentido
if ano_saida != ano_entrada:
continue
if mes_saida != mes_entrada:
continue
if dia_entrada > dia_saida:
continue
# Pega hora de entrada e saida
time_entrada = get_horas(entrada[i])
time_saida = get_horas(saida[i])
hora_entrada, mins_entrada, segs_entrada = get_hora_minutos_segs(time_entrada[0])
hora_saida, mins_saida, segs_saida = get_hora_minutos_segs(time_saida[0])
if (dia_saida - dia_entrada) == 1:
# Caso de um turno noturno
if hora_entrada > hora_saida:
if mins_entrada > mins_saida:
# Caso a diferenca entre os minutos nao complete uma hora e reinicie a contagem por ter virado a hora
# Por exemplo de 23:59 ate 09:05, temos 6 minutos e aqui eh possivel realizar esse calculo
minutos_trabalhados = 60 - mins_entrada
minutos_trabalhados += mins_saida
# desconta porque a hora nao é completa
hora_saida -= 1
elif mins_entrada <= mins_saida:
# Calcula normalmente os minutos trabalhados
minutos_trabalhados = mins_saida - mins_entrada
# Calcula o tempo ate a meia noite
hora_entrada_mins = (24*60) - ((hora_entrada*60) + mins_entrada)
if (hora_entrada_mins + minutos_trabalhados) >= 60:
while((hora_entrada_mins + minutos_trabalhados) >= 60):
# Faz a conversao das horas ate a meia noite e sobram os minutos trabalhados que serao calculados como fracao de hora
minutos_trabalhados -= 60
hora_saida += 1
# Computa as horas trabalhadas + a fracao de hora
horas_trabalhadas = hora_saida + (minutos_trabalhados/60)
horas_total.append(horas_trabalhadas)
else:
# Entao foi no mesmo dia e o for vai tratar ainda
continue
if dia_saida == dia_entrada:
# Caso de entrada e saida no mesmo dia
if mins_entrada > mins_saida:
# Caso a diferenca entre os minutos nao complete uma hora e reinicie a contagem por ter virado a hora
# Por exemplo de 10:45 ate 11:10, temos 25 minutos e aqui eh possivel realizar esse calculo
minutos_trabalhados = 60 - mins_entrada
minutos_trabalhados += mins_saida
# desconta porque a hora nao é completa
hora_saida -= 1
elif mins_entrada <= mins_saida:
minutos_trabalhados = mins_saida - mins_entrada
# Computa as horas trabalhadas subtraindo a hora de saida da hora de entrada + fracoes de minutos
horas_trabalhadas = (hora_saida-hora_entrada) + (minutos_trabalhados/60)
horas_total.append(horas_trabalhadas)
soma_horas = 0.0
for horas in horas_total:
# Faz o somatorio das horas totais de todos os dias ou periodos
soma_horas += horas
return {"horas trabalhadas": ("%.2f horas" % soma_horas)}
@ponto_blueprint.route('/limpar/', methods=['GET'])
def deletar():
"""
Limpa todos os pontos
"""
# Pega todas as batidas de ponto e deleta
Ponto.query.filter().delete()
# Salva as alteracoes no banco
current_app.db.session.commit()
return jsonify('Limpa a base')
|
{"/app/checks.py": ["/app/serializer.py", "/app/models.py"], "/app/__init__.py": ["/app/models.py", "/app/serializer.py", "/app/users.py", "/app/checks.py"], "/app/users.py": ["/app/serializer.py", "/app/models.py"], "/app/models.py": ["/app/serializer.py"]}
|
3,047
|
deharahawa/batida-ponto
|
refs/heads/master
|
/app/__init__.py
|
from flask import Flask
from flask_migrate import Migrate
from .models import configure as config_db
from .serializer import configure as config_ma
def create_app():
app = Flask(__name__)
# sqlite db uri configuration
app.config['SQLALCHEMY_DATABASE_URI'] = 'sqlite:////tmp/users.db'
# remove error from track mod
app.config['SQLALCHEMY_TRACK_MODIFICATIONS'] = False
# Configura DB
config_db(app)
# Configura Marshmallow
config_ma(app)
# Realiza migration
Migrate(app, app.db)
# Import dos blueprints
from .users import user_blueprint
app.register_blueprint(user_blueprint)
from .checks import ponto_blueprint
app.register_blueprint(ponto_blueprint)
return app
|
{"/app/checks.py": ["/app/serializer.py", "/app/models.py"], "/app/__init__.py": ["/app/models.py", "/app/serializer.py", "/app/users.py", "/app/checks.py"], "/app/users.py": ["/app/serializer.py", "/app/models.py"], "/app/models.py": ["/app/serializer.py"]}
|
3,048
|
deharahawa/batida-ponto
|
refs/heads/master
|
/app/users.py
|
from flask import Blueprint, request, jsonify, current_app
from .serializer import UserSchema
from .models import User
from datetime import datetime
user_blueprint = Blueprint('usuarios', __name__)
@user_blueprint.route('/cadastrar', methods=['POST'])
def cadastrar():
"""
Cadastra um user na base
"""
# Instancia o Schema
user_schema = UserSchema()
# Faz o load dos dados da request
user, error = user_schema.load(request.json)
# Verifica se houve erro no load
if error:
return jsonify(error), 401
# Pega a data atual no formato UTC
now = datetime.now()
# Cria o user
user = User(nome_completo=user['nome_completo'], cpf=user['cpf'], email=user['email'], data_cadastro=now)
# Salva as alteracoes no banco
current_app.db.session.add(user)
current_app.db.session.commit()
return user_schema.jsonify(user), 201
@user_blueprint.route('/mostrar', methods=['GET'])
def mostrar():
"""
Mostra todos os usuarios
"""
# Realiza a query de todos os usuarios
result = User.query.all()
return UserSchema(many=True).jsonify(result), 200
@user_blueprint.route('/modificar/<identificador>', methods=['POST'])
def modificar(identificador):
"""
Possibilita modificar um usuario sem mexer no id e na data
"""
# Instancia o Schema
user_schema = UserSchema()
# Faz a query para o user especifico
query = User.query.filter(User.id == identificador)
# Faz o update
query.update(request.json)
# Salva a alteracao
current_app.db.session.commit()
return user_schema.jsonify(query.first())
@user_blueprint.route('/deletar/<identificador>', methods=['GET'])
def deletar(identificador):
"""
Deleta um usuario
"""
# Faz a query em busca de um user especifico
User.query.filter(User.id == identificador).delete()
# Salva as alteracoes
current_app.db.session.commit()
return jsonify('Deletado')
|
{"/app/checks.py": ["/app/serializer.py", "/app/models.py"], "/app/__init__.py": ["/app/models.py", "/app/serializer.py", "/app/users.py", "/app/checks.py"], "/app/users.py": ["/app/serializer.py", "/app/models.py"], "/app/models.py": ["/app/serializer.py"]}
|
3,049
|
deharahawa/batida-ponto
|
refs/heads/master
|
/app/models.py
|
from flask_sqlalchemy import SQLAlchemy
from flask_rest_jsonapi import ResourceDetail, ResourceList
from .serializer import UserSchema
db = SQLAlchemy()
def configure(app):
"""
Factory para poder configurar
"""
# Inicializa o app
db.init_app(app)
with app.app_context():
# Cria as alteracoes usando o contexto
db.create_all()
app.db = db
class User(db.Model):
"""
Define a class que reprenta o model do User
"""
id = db.Column(db.Integer, primary_key=True)
nome_completo = db.Column(db.String(255))
cpf = db.Column(db.String(11))
email = db.Column(db.String(255))
data_cadastro = db.Column(db.DateTime)
class Ponto(db.Model):
"""
Define a class que reprenta o model do Ponto
"""
id = db.Column(db.Integer, primary_key=True)
# Define a chave estrangeira do relacionamento 1 para muitos
user_id = db.Column(db.Integer, db.ForeignKey('user.id'))
# Define o relacionamento entre user e pontos
user = db.relationship('User', backref='checks')
data_batida = db.Column(db.DateTime)
tipo_batida = db.Column(db.Integer)
|
{"/app/checks.py": ["/app/serializer.py", "/app/models.py"], "/app/__init__.py": ["/app/models.py", "/app/serializer.py", "/app/users.py", "/app/checks.py"], "/app/users.py": ["/app/serializer.py", "/app/models.py"], "/app/models.py": ["/app/serializer.py"]}
|
3,058
|
raulezama/bookstore
|
refs/heads/master
|
/frontend.py
|
from tkinter import *
import backend
def view_command():
list1.delete(0,END) #se pone antes de que el for entre a la lista para que no se repita la operacion, si se pone despues, la lista se elimina.
for row in backend.view():
list1.insert(END, row) #"""pyinstaller --onefile --windowed frontend.py / INSTALAR PYINSTALLER"""
def search_command():
list1.delete(0,END)
for row in backend.search(entry_title.get(),entry_author.get(), entry_year.get(), entry_id.get()):
list1.insert(END, row)
def add_command():
backend.insert(entry_title.get(),entry_author.get(), entry_year.get(), entry_id.get())
list1.delete(0,END) #limpia la lista
list1.insert(END,entry_title.get(),entry_author.get(), entry_year.get(), entry_id.get())
def get_selected_row(event): #funcion para enlazar la accion de seleccionar lista con el boton delete
global selected_tuple #Se declara global para poder usarse en la funcion delete
index=list1.curselection()[0] #se ubica el cursor en la lista seleccionada , ID index de 0
selected_tuple=list1.get(index) #Se extrae toda la informacion por el id
e1.delete(0, END)
e1.insert(END, selected_tuple[1]) #title index 1
e2.delete(0, END)
e2.insert(END, selected_tuple[2])
e3.delete(0, END)
e3.insert(END, selected_tuple[3])
e4.delete(0, END)
e4.insert(END, selected_tuple[4])
def delete_command():
backend.delete(selected_tuple[0])
def update_command():
backend.update(selected_tuple[0], entry_title.get(),entry_author.get(), entry_year.get(), entry_id.get())
window= Tk()
window.wm_title("BookStore")
la1= Label(window, text="Title")
la1.grid(row=0, column=0)
la2= Label(window, text="Year")
la2.grid(row=1, column=0)
la3= Label(window, text="Author")
la3.grid(row=0, column=2)
la4= Label(window, text="ISBN")
la4.grid(row=1, column=2)
entry_title=StringVar()
e1= Entry(window, textvariable=entry_title)
e1.grid(row=0, column=1)
entry_author=StringVar()
e2= Entry(window, textvariable=entry_author)
e2.grid(row=0, column=3)
entry_year=StringVar()
e3= Entry(window, textvariable=entry_year)
e3.grid(row=1, column=1)
entry_id=StringVar()
e4= Entry(window, textvariable=entry_id)
e4.grid(row=1, column=3)
list1=Listbox(window, height=6, width=35)
list1.grid(row=2, column=0, rowspan=6, columnspan=2)
scbar=Scrollbar(window)
scbar.grid(row=2, column=2, rowspan=6)
list1.configure(yscrollcommand=scbar.set)
scbar.configure(command=list1.yview)
list1.bind('<<ListboxSelect>>', get_selected_row) #Enlazar el scroll con la lista
b1=Button(window, text="View all", width=12, command=view_command)
b1.grid(row=2, column=3)
b2=Button(window, text="Search entry", width=12, command=search_command)
b2.grid(row=3, column=3)
b3=Button(window, text="Add entry", width=12, command=add_command)
b3.grid(row=4, column=3)
b4=Button(window, text="Update selected", width=12, command=update_command)
b4.grid(row=5, column=3)
b5=Button(window, text="Delete selected", width=12, command=delete_command)
b5.grid(row=6, column=3)
b6=Button(window, text="Close", width=12, command=window.destroy)
b6.grid(row=7, column=3)
window.mainloop()
|
{"/frontend.py": ["/backend.py"]}
|
3,059
|
raulezama/bookstore
|
refs/heads/master
|
/backend.py
|
import sqlite3
def connect_db():
conn= sqlite3.connect("books.db")
cur= conn.cursor()
cur.execute("CREATE TABLE IF NOT EXISTS book (id INTEGER PRIMARY KEY, title TEXT, author TEXT, year INTEGER, isbn INTEGER)")
conn.commit()
conn.close()
def insert(title, author, year, isbn):
conn= sqlite3.connect("books.db")
cur= conn.cursor()
cur.execute("INSERT INTO book VALUES (NULL, ?, ?, ?, ?) ", (title, author, year, isbn))
conn.commit()
conn.close()
def view():
conn= sqlite3.connect("books.db")
cur= conn.cursor()
cur.execute("SELECT * FROM book")
rows=cur.fetchall()
conn.close()
return rows
def search(title="", author="", year="", isbn=""): #Le pasamos parametros de busqueda para el filtro y cadenas vacias para que no retorne ningun error
conn=sqlite3.connect("books.db")
cur=conn.cursor()
cur.execute("SELECT * FROM book WHERE title=? OR author=? OR year=? OR isbn=?", (title, author, year, isbn))
rows=cur.fetchall()
conn.close()
return rows
def delete(id): #Se pasa el parametro id ya que el registro sera eliminado por ese argumento
conn= sqlite3.connect("books.db")
cur= conn.cursor()
cur.execute("DELETE FROM book WHERE id=?",(id,))
conn.commit()
conn.close()
def update(id, title, author, year, isbn):
conn= sqlite3.connect("books.db")
cur= conn.cursor()
cur.execute("UPDATE book SET title=?, author=?, year=?, isbn=? WHERE id=?",(title, author, year, isbn, id))
conn.commit()
conn.close()
connect_db()
#insert("The Lord of the Rings", "J.R.R Tolkien", 1942, 3344348)
#delete(3)
#update(2, "The lord of the rings", "JRR Tolkien", 1956, 64646)
#print(view())
#print(search("The Vampire Diaries"))
|
{"/frontend.py": ["/backend.py"]}
|
3,062
|
joshharper64/frost
|
refs/heads/master
|
/resident_reports/apps.py
|
from django.apps import AppConfig
class ResidentReportsConfig(AppConfig):
name = 'resident_reports'
|
{"/resident_reports/views.py": ["/resident_reports/models.py"], "/resident_reports/admin.py": ["/resident_reports/models.py"]}
|
3,063
|
joshharper64/frost
|
refs/heads/master
|
/resident_reports/migrations/0003_auto_20170517_0033.py
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.1 on 2017-05-17 00:33
from __future__ import unicode_literals
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('resident_reports', '0002_report'),
]
operations = [
migrations.RemoveField(
model_name='report',
name='topic',
),
migrations.DeleteModel(
name='Topic',
),
]
|
{"/resident_reports/views.py": ["/resident_reports/models.py"], "/resident_reports/admin.py": ["/resident_reports/models.py"]}
|
3,064
|
joshharper64/frost
|
refs/heads/master
|
/resident_reports/urls.py
|
from django.conf.urls import url
from . import views
urlpatterns = [
url(r'^allreports/$', views.allreports, name='allreports'),
url(r'^new_report/$', views.new_report, name='new_report'),
url(r'^edit_report/(?P<report_id>\d+)/$', views.edit_report, name='edit_report'),
]
|
{"/resident_reports/views.py": ["/resident_reports/models.py"], "/resident_reports/admin.py": ["/resident_reports/models.py"]}
|
3,065
|
joshharper64/frost
|
refs/heads/master
|
/resident_reports/views.py
|
from django.shortcuts import render
from django.http import HttpResponseRedirect
from django.core.urlresolvers import reverse
from django.contrib.auth.decorators import login_required
from django.contrib.auth import get_user_model
from .models import Report
from .forms import ReportForm
def allreports(request):
""" Show list of all reports, regardless of topic """
reports = Report.objects.order_by('-date_added')
context = {'reports': reports}
return render(request, 'resident_reports/allreports.html', context)
@login_required
def new_report(request):
""" Add new report """
if request.method != 'POST':
form = ReportForm()
else:
form = ReportForm(data=request.POST)
if form.is_valid():
new_entry = form.save(commit=False)
new_entry.user_name = request.user
form.save()
return HttpResponseRedirect(reverse('resident_reports:allreports'))
context = {'form': form}
return render(request, 'resident_reports/new_report.html', context)
@login_required
def edit_report(request, entry_id):
""" Edit an existing report """
report = Report.objects.get(id=entry_id)
if report.owner != request.owner:
return HttpResponseRedirect(reverse('resident_reports:allreports'))
if request.method != 'POST':
form = ReportForm(instance=report)
else:
form = ReportForm(instance=entry, data=request.POST)
if form.is_valid:
form.save()
return HttpResponseRedirect(reverse('resident_reports:allreports'))
context = {'report': report, 'form': form}
return render(request, 'resident_reports/edit_report.html', context)
|
{"/resident_reports/views.py": ["/resident_reports/models.py"], "/resident_reports/admin.py": ["/resident_reports/models.py"]}
|
3,066
|
joshharper64/frost
|
refs/heads/master
|
/resident_reports/models.py
|
from django.db import models
from django.contrib.auth.models import User
class Report(models.Model):
""" Report by User """
text = models.TextField()
date_added = models.DateTimeField(auto_now_add=True)
user_name = models.ForeignKey(User)
class Meta:
verbose_name_plural = 'reports'
def __str__(self):
return self.text[:50] + "..."
|
{"/resident_reports/views.py": ["/resident_reports/models.py"], "/resident_reports/admin.py": ["/resident_reports/models.py"]}
|
3,067
|
joshharper64/frost
|
refs/heads/master
|
/resident_reports/admin.py
|
from django.contrib import admin
from resident_reports.models import Report
admin.site.register(Report)
|
{"/resident_reports/views.py": ["/resident_reports/models.py"], "/resident_reports/admin.py": ["/resident_reports/models.py"]}
|
3,068
|
joshharper64/frost
|
refs/heads/master
|
/homepage/views.py
|
from django.shortcuts import render
def index(request):
"""Homepage"""
return render(request, 'homepage/index.html')
def about(request):
"""About Section"""
return render(request, 'homepage/about.html')
|
{"/resident_reports/views.py": ["/resident_reports/models.py"], "/resident_reports/admin.py": ["/resident_reports/models.py"]}
|
3,099
|
GitGude/NBA-webscrape
|
refs/heads/master
|
/[sample] basketballref - example.py
|
import requests
from bs4 import BeautifulSoup
import numpy as np
import pandas as pd
import string
import datetime
import time
def player_info():
players = []
base_url = 'http://www.basketball-reference.com/players/'
for letter in string.ascii_lowercase:
page_request = requests.get(base_url + letter)
soup = BeautifulSoup(page_request.text, 'html.parser')
table = soup.find('table')
# Testing if data is coming through..
# print(table)
if table:
table_body = table.find('tbody')
for row in table_body.findAll('tr'):
# print(row)
player_url = row.find('a')
player_names = player_url.text
player_pages = player_url['href']
print(player_url)
print(player_names)
print(player_pages)
# cells = row.findAll('td')
# active_from = int(cells[0].text)
# active_to = int(cells[1].text)
# position = cells[2].text
# height = cells[3].text
# weight = cells[4].text
# birth_date = cells[5].text
# college = cells[6].text
#
# player_entry = {'url': player_pages,
# 'name': player_names,
# 'active_from': active_from,
# 'active_to': active_to,
# 'position': position,
# 'college': college,
# 'height': height,
# 'weight': weight,
# 'birth_date': birth_date}
#
# players.append(player_entry)
#
# return pd.DataFrame(players)
#
# players_general_info = player_info()
print(player_info())
# print(players_general_info.head())
|
{"/Basketball-Ref - Seasons.Schedule&Resuls - scraper.py": ["/month_loop.py"]}
|
3,100
|
GitGude/NBA-webscrape
|
refs/heads/master
|
/NBA-Stats-Reader.py
|
import pandas as pd
import csv
import numpy as np
# to find the path of csv files
# import os
# print(os.getcwd())
# out: ../Users/Docs/etc
# print(os.listdir(os.getcwd())
# out: ['Names of csv files]
# Reading content from the csv
# Reading a csv file
df = pd.read_csv('../PythonProjects/NBA_TeamSchedule.csv')
# print(df.dtypes)
# print(df.columns)
# print(df.describe())
# List of NBA teams
ATL = 'Atlanta Hawks'
BOS = 'Boston Celtics'
BKN = 'Brooklyn Nets'
CHA = 'Charlotte Hornets'
CHI = 'Chicago Bulls'
CLE = 'Cleveland Cavaliers'
DAL = 'Dallas Mavericks'
DEN = 'Denver Nuggets'
DET = 'Detroit Pistons'
GSW = 'Golden State Warriors'
HOU = 'Houston Rockets'
IND = 'Indiana Pacers'
LAC = 'Los Angeles Clippers'
LAL = 'Los Angeles Lakers'
MEM = 'Memphis Grizzlies'
MIA = 'Miami Heat'
MIL = 'Milwaukee Bucks'
MIN = 'Minnesota Timberwolves'
NOR = 'New Orleans Pelicans'
OKC = 'Oklahoma City Thunder'
ORL = 'Orlando Magic'
PHI = 'Philadelphia 76ers'
PHO = 'Phoenix Suns'
POR = 'Portland Trail Blazers'
SAC = 'Sacramento Kings'
SAS = 'San Antonio Spurs'
TOR = 'Toronto Raptors'
UTA = 'Utah Jazz'
WAS = 'Washington Wizards'
# print(df)
x = df['Difference'] = df['Away Pts'] - df['Home Pts']
# print(x)
# Creating a new column 'Difference'
p = df[df['Difference'] > 200][['Home Team', 'Away Team', 'Difference']]
print(p)
# if df['Difference'] > 200:
# print(['Away Team' + 'vs' + 'Home Team'])
# Things to find:
# - Matchups with the largest differentials
# - Which team wins at home more
# - Which team wins away more
# Writing the contents out of the csv
# with open('../PythonProjects/NBA_TeamSchedule.csv', newline='') as csvfile:
# b_reader = csv.reader(csvfile, delimiter=' ', quotechar='|')
# for row in b_reader:
# print(','.join(row))
# df = pd.read_csv('../PythonProjects/[working] basketballref_teams.py', sep=',', keep_default_na=False)
|
{"/Basketball-Ref - Seasons.Schedule&Resuls - scraper.py": ["/month_loop.py"]}
|
3,101
|
GitGude/NBA-webscrape
|
refs/heads/master
|
/Basketball-Ref - Seasons.Schedule&Resuls - scraper.py
|
import requests
from bs4 import BeautifulSoup
import pandas as pd
import string
import month_loop
# # 2018 Stat urls
# list = {'NBA_2018_games-october.html',
# 'NBA_2018_games-november.html',
# 'NBA_2018_games-december.html',
# 'NBA_2018_games-january.html''
# 'NBA_2018_games-february.html',
# 'NBA_2018_games-march.html'
# g = 'NBA_2018_games-april.html'
# h = 'NBA_2018_games-may.html'
# i = 'NBA_2018_games-june.html'
#
# # 2019 Stat urls
# j = 'NBA_2019_games-october.html'
# k = 'NBA_2019_games-november.html'
# l = 'NBA_2019_games-december.html'
# m = 'NBA_2019_games-january.html'
# n = 'NBA_2019_games-february.html'
# o = 'NBA_2019_games-march.html'
# p = 'NBA_2019_games-april.html'
def get_team_info():
# for letter in list:
teams = []
base_url = 'https://www.basketball-reference.com/leagues/'
for u in month_loop.get_url():
page_request = requests.get(base_url + u)
# print(page_request)
soup = BeautifulSoup(page_request.text, 'html.parser')
table = soup.find('table')
# print(soup)
# Looks at the table element..
if table:
table_body = table.find('tbody')
# Loops on all 'tr' elements on the table (rows)..
for row in table_body.findAll('tr'):
pl = row.findAll('th')
# print(pl)
data1 = row.findAll('td')
if not len(data1) <= 0:
home_pts = data1[4].text
# print(home_pts)
if not len(home_pts) <= 0:
cells = row.findAll('a')
# url = row.findAll['href']
date = cells[0].text
away_team = cells[1].text
home_team = cells[2].text
home_pts = int(data1[4].text)
away_pts = int(data1[2].text)
# boxscore_url = cells[3].text
# Testing the data that is pulled through from the above..
# print(date)
# print(away_team)
# print(home_team)
# print(home_pts)
# print(away_pts)
team_entry = {"Away Team": away_team,
"Away Pts": away_pts,
"Home Team": home_team,
"Home Pts": home_pts,
"xDate": date}
# "Boxscore URL": boxscre_url}
teams.append(team_entry)
# return pd.DataFrame(teams)
# Need to set the Date, Team Names and URL on another loop as it
# they are all under element 'a'...
# team_url = row.findAll('a')
# team_names = team_url.text
# team_pages = team_url['href']
# print(team_names)
# print(team_pages)
# This code aligns each iteration/index with a column header in a DataFrame..
# Currently works.. although we need to convert pts to intergesr.. currently all strings
# cells = row.findAll('td')
# date = cells[0].text
# start_time = cells[1].text
# away_team = cells[2].text
# away_pts = int(cells[3].text)
# home_team = cells[4].text
# home_pts = int(cells[5].text)
# boxscore = cells[6].text
# overtime = cells[7].text
# # attendance = cells[8].text
# # notes = cells[9].text
#
# team_entry = {"Date": date,
# "Start Time": start_time,
# "Away": away_team,
# "Away Pts": away_pts,
# "Home Team": home_team,
# "Home Pts": home_pts,
# "Boxscore url": boxscore,
# "Overtime": overtime}
# # "Attendance": attendance}
# # "Notes": notes}
#
# teams.append(team_entry)
#
return pd.DataFrame(teams)
teams_general_info = get_team_info()
# print(get_team_info())
# print(teams_general_info.head())
print(teams_general_info.to_csv("NBA_TeamSchedule.csv", sep=',', encoding='utf-8', index=False))
# Writing to CSV
# def import_to_csv():
# wks = teams_general_info.to_csv("NBA_TeamSchedule.csv", sep='\t', encoding='utf-8')
# return wks
|
{"/Basketball-Ref - Seasons.Schedule&Resuls - scraper.py": ["/month_loop.py"]}
|
3,102
|
GitGude/NBA-webscrape
|
refs/heads/master
|
/Basketball-ref_TeamScore.py
|
from bs4 import BeautifulSoup
import requests
base_url = 'https://www.basketball-reference.com/leagues/'
Oct_2019 = 'NBA_2019_games-october.html'
Nov_2019 = 'NBA_2019_games-november.html'
Dec_2019 = 'NBA_2019_games-december.html'
Jan_2019 = 'NBA_2019_games-january.html'
Feb_2019 = 'NBA_2019_games-february.html'
Mar_2019 = 'NBA_2019_games-march.html'
Apr_2019 = 'NBA_2019_games-april.html'
|
{"/Basketball-Ref - Seasons.Schedule&Resuls - scraper.py": ["/month_loop.py"]}
|
3,103
|
GitGude/NBA-webscrape
|
refs/heads/master
|
/NBAProjekt2.py
|
import pygsheets
import pandas as pd
gc = pygsheets.authorize(service_account_file='\PythonProjects\venv\NBA Project 1-a1b8594c93d2.json')
df = pd.DataFrame()
df['name'] = ['Kyle', 'Mel', 'Moochie']
sh = gc.open('NBAPython')
wks = sh[0]
wks.set_dataframe(df(1,1))
|
{"/Basketball-Ref - Seasons.Schedule&Resuls - scraper.py": ["/month_loop.py"]}
|
3,104
|
GitGude/NBA-webscrape
|
refs/heads/master
|
/month_loop.py
|
import calendar
# Working...
def get_month():
m = [] #Creating a list to store each month
for month in range(1, 13):
m.append(calendar.month_name[month].lower())
return m
def get_year():
year = []
base_year = 2016
while base_year < 2019:
base_year += 1
year.append(base_year)
return year
def get_url():
url = []
for i in get_year():
for m in get_month():
url.append('NBA_' + str(i) + '_games-' + str(m) + '.html')
return url
print(get_url())
|
{"/Basketball-Ref - Seasons.Schedule&Resuls - scraper.py": ["/month_loop.py"]}
|
3,105
|
nforsch/SSCP19-mechanics-project7
|
refs/heads/master
|
/lhs.py
|
from pyDOE import *
from scipy.stats.distributions import norm
# Latin Hypercube Sampling
# see: https://pythonhosted.org/pyDOE/randomized.html
# Run LHS for n factors
X = lhs(4, samples=100) # lhs(n, [samples, criterion, iterations])
# Transform factors to normal distributions with means and standard deviations
means = [1, 2, 3, 4]
stdvs = [0.1, 0.5, 1, 0.25]
for i in range(4):
X[:, i] = norm(loc=means[i], scale=stdvs[i]).ppf(X[:, i])
|
{"/compute_displacement_subset.py": ["/demo.py"], "/compute_surface_nodes.py": ["/demo.py"]}
|
3,106
|
nforsch/SSCP19-mechanics-project7
|
refs/heads/master
|
/demo.py
|
import os
import numpy as np
import dolfin as df
import pulse
import ldrb
import matplotlib.pyplot as plt
def create_geometry(h5name):
"""
Create an lv-ellipsoidal mesh and fiber fields using LDRB algorithm
An ellipsoid is given by the equation
.. math::
\frac{x^2}{a} + \frac{y^2}{b} + \frac{z^2}{c} = 1
We create two ellipsoids, one for the endocardium and one
for the epicardium and subtract them and then cut the base.
For simplicity we assume that the longitudinal axis is in
in :math:`x`-direction and as default the base is located
at the :math:`x=0` plane.
"""
# Number of subdivision (higher -> finer mesh)
N = 13
# Parameter for the endo ellipsoid
a_endo = 1.5
b_endo = 0.5
c_endo = 0.5
# Parameter for the epi ellipsoid
a_epi = 2.0
b_epi = 1.0
c_epi = 1.0
# Center of the ellipsoid (same of endo and epi)
center = (0.0, 0.0, 0.0)
# Location of the base
base_x = 0.0
# Create a lv ellipsoid mesh with longitudinal axis along the x-axis
geometry = ldrb.create_lv_mesh(
N=N,
a_endo=a_endo,
b_endo=b_endo,
c_endo=c_endo,
a_epi=a_epi,
b_epi=b_epi,
c_epi=c_epi,
center=center,
base_x=base_x
)
# Select fiber angles for rule based algorithm
angles = dict(alpha_endo_lv=60, # Fiber angle on the endocardium
alpha_epi_lv=-60, # Fiber angle on the epicardium
beta_endo_lv=0, # Sheet angle on the endocardium
beta_epi_lv=0) # Sheet angle on the epicardium
fiber_space = 'Lagrange_1'
# Compte the microstructure
fiber, sheet, sheet_normal = ldrb.dolfin_ldrb(mesh=geometry.mesh,
fiber_space=fiber_space,
ffun=geometry.ffun,
markers=geometry.markers,
**angles)
# Compute focal point
focal = np.sqrt(a_endo**2 - (0.5 * (b_endo + c_endo))**2)
# Make mesh according to AHA-zons
# pulse.geometry_utils.mark_strain_regions(mesh=geometry.mesh, foc=focal)
pulse.geometry_utils.mark_strain_regions(mesh=geometry.mesh,
foc=focal,
nsectors=(15, 15, 15, 5))
mapper = {'lv': 'ENDO', 'epi': 'EPI', 'rv': 'ENDO_RV', 'base': 'BASE'}
m = {mapper[k]: (v, 2) for k, v in geometry.markers.items()}
pulse.geometry_utils.save_geometry_to_h5(
geometry.mesh, h5name, markers=m,
fields=[fiber, sheet, sheet_normal],
overwrite_file=True
)
def load_geometry(h5name='ellipsoid.h5', recreate=False):
if not os.path.exists(h5name) or recreate:
create_geometry(h5name)
geo = pulse.HeartGeometry.from_file(h5name)
# Scale mesh to a realistic size
geo.mesh.coordinates()[:] *= 4.5
return geo
def save_geometry_vis(geometry, folder='geometry'):
"""
Save the geometry as well as markers and fibers to files
that can be visualized in paraview
"""
if not os.path.isdir(folder):
os.makedirs(folder)
for attr in ['mesh', 'ffun', 'cfun']:
print('Save {}'.format(attr))
df.File('{}/{}.pvd'.format(folder, attr)) << getattr(geometry, attr)
for attr in ['f0', 's0', 'n0']:
ldrb.fiber_to_xdmf(getattr(geometry, attr),
'{}/{}'.format(folder, attr))
def get_strains(u, v, dx):
F = pulse.kinematics.DeformationGradient(u)
E = pulse.kinematics.GreenLagrangeStrain(F, isochoric=False)
return df.assemble(df.inner(E*v, v) * dx) \
/ df.assemble(df.Constant(1.0) * dx)
def get_nodal_coordinates(u):
mesh = df.Mesh(u.function_space().mesh())
V = df.VectorFunctionSpace(mesh, "CG", 1)
df.ALE.move(mesh, df.interpolate(u, V))
return mesh.coordinates()
def postprocess(geometry):
"""
Get strain at nodal values
Arguments
---------
filename : str
Filname where to store the results
"""
coords = [geometry.mesh.coordinates()]
V = df.VectorFunctionSpace(geometry.mesh, "CG", 2)
Ef = np.zeros((3, 17))
u_ED = df.Function(V, "ED_displacement.xml")
coords.append(get_nodal_coordinates(u_ED))
for i in range(17):
Ef[1, i] = get_strains(u_ED, geometry.f0, geometry.dx(i+1))
EDV = geometry.cavity_volume(u=u_ED)
u_ES = df.Function(V, "ES_displacement.xml")
coords.append(get_nodal_coordinates(u_ES))
for i in range(17):
Ef[2, i] = get_strains(u_ES, geometry.f0, geometry.dx(i+1))
ESV = geometry.cavity_volume(u=u_ES)
# Stroke volume
SV = EDV - ESV
# Ejection fraction
EF = SV / EDV
print(("EDV: {EDV:.2f} ml\nESV: {ESV:.2f} ml\nSV: {SV:.2f}"
" ml\nEF: {EF:.2f}").format(EDV=EDV, ESV=ESV, SV=SV, EF=EF))
# Save nodes as txt at ED and ES
np.savetxt('coords_ED.txt',coords[1],fmt='%.4f',delimiter=',')
np.savetxt('coords_ES.txt',coords[2],fmt='%.4f',delimiter=',')
fig, ax = plt.subplots(1, 3, sharex=True, sharey=True)
for i in range(17):
j = i // 6
# from IPython import embed; embed()
# exit()
ax[j].plot(Ef[:, i], label="region {}".format(i+1))
ax[0].set_title("Basal")
ax[1].set_title("Mid")
ax[2].set_title("Apical")
ax[0].set_ylabel("Fiber strain")
for axi in ax:
axi.set_xticks(range(3))
axi.set_xticklabels(["", "ED", "ES"])
axi.legend()
plt.show()
def solve(
geometry,
EDP=1.0,
ESP=15.0,
Ta=60,
material_parameters=None,
):
"""
Arguments
---------
EDP : float
End diastolic pressure
ESP : float
End systolic pressure
Ta : float
Peak active tension (at ES)
material_parameters : dict
A dictionart with parameter in the Guccione model.
Default: {'C': 2.0, 'bf': 8.0, 'bt': 2.0, 'bfs': 4.0}
filename : str
Filname where to store the results
"""
# Create model
activation = df.Function(df.FunctionSpace(geometry.mesh, "R", 0))
matparams = pulse.Guccione.default_parameters()
if material_parameters is not None:
matparams.update(material_parameters)
material = pulse.Guccione(activation=activation,
parameters=matparams,
active_model="active_stress",
f0=geometry.f0,
s0=geometry.s0,
n0=geometry.n0)
lvp = df.Constant(0.0)
lv_marker = geometry.markers['ENDO'][0]
lv_pressure = pulse.NeumannBC(traction=lvp,
marker=lv_marker, name='lv')
neumann_bc = [lv_pressure]
# Add spring term at the base with stiffness 1.0 kPa/cm^2
base_spring = 1.0
robin_bc = [pulse.RobinBC(value=df.Constant(base_spring),
marker=geometry.markers["BASE"][0])]
# Fix the basal plane in the longitudinal direction
# 0 in V.sub(0) refers to x-direction, which is the longitudinal direction
def fix_basal_plane(W):
V = W if W.sub(0).num_sub_spaces() == 0 else W.sub(0)
bc = df.DirichletBC(V.sub(0),
df.Constant(0.0),
geometry.ffun, geometry.markers["BASE"][0])
return bc
dirichlet_bc = [fix_basal_plane]
# Collect boundary conditions
bcs = pulse.BoundaryConditions(dirichlet=dirichlet_bc,
neumann=neumann_bc,
robin=robin_bc)
# Create the problem
problem = pulse.MechanicsProblem(geometry, material, bcs)
xdmf = df.XDMFFile(df.mpi_comm_world(), 'output.xdmf')
# Solve the problem
print(("Do an initial solve with pressure = 0 kPa "
"and active tension = 0 kPa"))
problem.solve()
u, p = problem.state.split()
xdmf.write(u, 0.0)
print("LV cavity volume = {} ml".format(geometry.cavity_volume(u=u)))
# Solve for ED
print(("Solver for ED with pressure = {} kPa and active tension = 0 kPa"
"".format(EDP)))
pulse.iterate.iterate(problem, lvp, EDP, initial_number_of_steps=20)
u, p = problem.state.split(deepcopy=True)
xdmf.write(u, 1.0)
df.File("ED_displacement.xml") << u
print("LV cavity volume = {} ml".format(geometry.cavity_volume(u=u)))
# Solve for ES
print(("Solver for ES with pressure = {} kPa and active tension = {} kPa"
"".format(ESP, Ta)))
pulse.iterate.iterate(problem, lvp, ESP,
initial_number_of_steps=50)
pulse.iterate.iterate(problem, activation, Ta,
adapt_step=False, max_iters=100,
initial_number_of_steps=40)
u, p = problem.state.split(deepcopy=True)
xdmf.write(u, 2.0)
df.File("ES_displacement.xml") << u
print("LV cavity volume = {} ml".format(geometry.cavity_volume(u=u)))
def main():
geometry = load_geometry(h5name='ellipsoid.h5', recreate=True)
save_geometry_vis(geometry, folder='geometry')
import time
t0 = time.time()
solve(geometry,
EDP=1.0,
ESP=15.0,
Ta=60,
material_parameters=None)
t1 = time.time()
print('Elapsed time = {:.2f} seconds'.format(t1 - t0))
postprocess(geometry)
if __name__ == "__main__":
main()
|
{"/compute_displacement_subset.py": ["/demo.py"], "/compute_surface_nodes.py": ["/demo.py"]}
|
3,107
|
nforsch/SSCP19-mechanics-project7
|
refs/heads/master
|
/compute_displacement_subset.py
|
import os
import numpy as np
import dolfin as df
import pulse
import ldrb
import matplotlib.pyplot as plt
from scipy import spatial
from demo import load_geometry
pi = np.pi
def cart2prolate( focalLength, XYZ ):
# Convert Cartesian XYZ to Prolate TML
# TML[0] = theta, TML[1] = mu, TML[2] = lambda
X = XYZ.T[0]
Y = XYZ.T[1]
Z = XYZ.T[2]
r1 = np.sqrt( Y**2 + Z**2 + (X+focalLength)**2 )
r2 = np.sqrt( Y**2 + Z**2 + (X-focalLength)**2 )
lmbda = np.real( np.arccosh((r1+r2)/(2*focalLength)) )
mu = np.real( np.arccos((r1-r2)/(2*focalLength)) )
theta = np.arctan2(Z,Y)
idx = theta<0
theta[idx] = theta[idx] + 2*np.pi
TML = np.concatenate(([theta], [mu], [lmbda]))
return TML
def prolate2cart( focalLength, TML ):
# Convert Prolate TML to Cartesian XYZ
# XYZ[0] = X, XYZ[1] = Y, XYZ[2] = Z
theta = TML[0]
mu = TML[1]
lmbda = TML[2]
X = focalLength * np.cosh(lmbda) * np.cos(mu)
Y = focalLength * np.sinh(lmbda) * np.sin(mu) * np.cos(theta)
Z = focalLength * np.sinh(lmbda) * np.sin(mu) * np.sin(theta)
XYZ = np.concatenate(([X],[Y],[Z]))
return XYZ
def focal( a, b, c ):
focalLength = np.sqrt( a**2 - (0.5*(b+c))**2 )
return focalLength
def get_surface_points(marker):
coordinates = []
idxs = []
# Loop over the facets
for facet in df.facets(geometry.mesh):
# If the facet markers matched that of ENDO
if geometry.ffun[facet] == marker:
# Loop over the vertices of that facets
for vertex in df.vertices(facet):
idxs.append(vertex.global_index())
# coordinates.append(tuple(vertex.midpoint().array()))
# Remove duplicates
idxs = np.array(list(set(idxs)))
coordinates = geometry.mesh.coordinates()[idxs]
return coordinates, idxs
def fit_prolate( P ):
# Sample nodes of mesh using prolate coordinates to get displacements for
# same number of points, similar regions across meshes
# input P = TML from mesh endo/epi
mu_max = np.amax(P[1]) # find max mu coordinate from mesh
tree = spatial.KDTree(P[0:2].T) # setup tree for finding nearest point
idx_match = []
sample_points = []
for theta in np.linspace(pi/2,2*pi,4): # theta range
for mu in np.linspace(0,mu_max,5): # mu ranges from 0 to mu_max based on mesh
sample_points.append([theta,mu]) # list of sampled [theta,mu] combinations
distance, index = tree.query([theta,mu]) # find closest point
idx_match.append(index) # store index of point in endo or epi
return idx_match
# Define coordinates of ED mesh for endo and epi
geometry = load_geometry('ellipsoid.h5')
# Get nodes ENDO
marker_endo = geometry.markers['ENDO'][0]
endo_coordinates, endo_idxs = get_surface_points(marker_endo)
# Get nodes EPI
marker_epi = geometry.markers['EPI'][0]
epi_coordinates, epi_idxs = get_surface_points(marker_epi)
# convert Cartesian coordinates to Prolate, find maximum mu value
focalLength_endo = focal(4.1,1.6,1.6) # same parameters [a,b,c] used for mesh
focalLength_epi = focal(5,2.9,2.9) # same parameters [a,b,c] used for mesh
TML_endo = cart2prolate(focalLength_endo, endo_coordinates)
TML_epi = cart2prolate(focalLength_epi, epi_coordinates)
# XYZ_endo = prolate2cart(focalLength_endo,TML_endo) # check return XYZ from TML
# Find fit to closest node by varying theta, mu and fitting lambda (store index of node)
idx_match_endo = fit_prolate(TML_endo)
idx_match_epi = fit_prolate(TML_epi)
idx_node_endo = endo_idxs[idx_match_endo].tolist()
idx_node_epi = epi_idxs[idx_match_epi].tolist()
idx_nodes = idx_node_endo + idx_node_epi
# Get displacement between ES and ED using idx_nodes
print('Loading ED and ES mesh coordinates...')
ed_coordinates = np.loadtxt('coords_ED.txt',delimiter=',')
es_coordinates = np.loadtxt('coords_ES.txt',delimiter=',')
displacement = es_coordinates-ed_coordinates # calculate displacement between ED and ES
disp_out = displacement[idx_nodes] # get displacement for nodes in list idx_nodes
print('Saving displacements for %d points' %(len(idx_nodes)))
np.savetxt('displacement.txt',disp_out,fmt='%.8f',delimiter=',')
# from IPython import embed; embed()
|
{"/compute_displacement_subset.py": ["/demo.py"], "/compute_surface_nodes.py": ["/demo.py"]}
|
3,108
|
nforsch/SSCP19-mechanics-project7
|
refs/heads/master
|
/compute_surface_nodes.py
|
import dolfin as df
from demo import load_geometry
geometry = load_geometry()
endo_coordinates = []
endo_marker = geometry.markers['ENDO'][0]
# Loop over the facets
for facet in df.facets(geometry.mesh):
# If the facet markers matched that of ENDO
if geometry.ffun[facet] == endo_marker:
# Loop over the vertices of that facets
for vertex in df.vertices(facet):
endo_coordinates.append(tuple(vertex.midpoint().array()))
# Remove duplicates
endo_coordinates = set(endo_coordinates)
|
{"/compute_displacement_subset.py": ["/demo.py"], "/compute_surface_nodes.py": ["/demo.py"]}
|
3,109
|
nforsch/SSCP19-mechanics-project7
|
refs/heads/master
|
/create_ellipsoid.py
|
import os
import numpy as np
import dolfin as df
import pulse
import ldrb
def create_geometry(h5name):
"""
Create an lv-ellipsoidal mesh and fiber fields using LDRB algorithm
An ellipsoid is given by the equation
.. math::
\frac{x^2}{a} + \frac{y^2}{b} + \frac{z^2}{c} = 1
We create two ellipsoids, one for the endocardium and one
for the epicardium and subtract them and then cut the base.
For simplicity we assume that the longitudinal axis is in
in :math:`x`-direction and as default the base is located
at the :math:`x=0` plane.
"""
# Number of subdivision (higher -> finer mesh)
N = 13
# Parameter for the endo ellipsoid
a_endo = 1.5
b_endo = 0.5
c_endo = 0.5
# Parameter for the epi ellipsoid
a_epi = 2.0
b_epi = 1.0
c_epi = 1.0
# Center of the ellipsoid (same of endo and epi)
center = (0.0, 0.0, 0.0)
# Location of the base
base_x = 0.0
# Create a lv ellipsoid mesh with longitudinal axis along the x-axis
geometry = ldrb.create_lv_mesh(
N=N,
a_endo=a_endo,
b_endo=b_endo,
c_endo=c_endo,
a_epi=a_epi,
b_epi=b_epi,
c_epi=c_epi,
center=center,
base_x=base_x
)
# Select fiber angles for rule based algorithm
angles = dict(alpha_endo_lv=60, # Fiber angle on the endocardium
alpha_epi_lv=-60, # Fiber angle on the epicardium
beta_endo_lv=0, # Sheet angle on the endocardium
beta_epi_lv=0) # Sheet angle on the epicardium
fiber_space = 'Lagrange_1'
# Compte the microstructure
fiber, sheet, sheet_normal = ldrb.dolfin_ldrb(mesh=geometry.mesh,
fiber_space=fiber_space,
ffun=geometry.ffun,
markers=geometry.markers,
**angles)
# Compute focal point
focal = np.sqrt(a_endo**2 - (0.5 * (b_endo + c_endo))**2)
# Make mesh according to AHA-zons
pulse.geometry_utils.mark_strain_regions(mesh=geometry.mesh, foc=focal)
mapper = {'lv': 'ENDO', 'epi': 'EPI', 'rv': 'ENDO_RV', 'base': 'BASE'}
m = {mapper[k]: (v, 2) for k, v in geometry.markers.items()}
pulse.geometry_utils.save_geometry_to_h5(
geometry.mesh, h5name, markers=m,
fields=[fiber, sheet, sheet_normal]
)
create_geometry('ellipsoid.h5')
|
{"/compute_displacement_subset.py": ["/demo.py"], "/compute_surface_nodes.py": ["/demo.py"]}
|
3,110
|
nforsch/SSCP19-mechanics-project7
|
refs/heads/master
|
/pca.py
|
# PCA demo
# Uses PCA from sklearn.decomposition: http://scikit-learn.org/stable/modules/generated/sklearn.decomposition.PCA.html
get_ipython().run_line_magic('matplotlib', 'inline')
import matplotlib.pyplot as plt
from sklearn.decomposition import PCA
import numpy as np
import seaborn as sns; sns.set()
# Data
X_train = []
X_sample = []
# PCA
pca = PCA(n_components=2)
pca.fit(X_train)
# pca.explained_variance_
# pca.explained_variance_ratio_
# pca.components_
# pca.mean_
# pca.singular_values_
# Transform sample data
sample_weights = pca.transform(X_sample)
# Recreate from component weights
X_recreate = pca.mean_ + sample_weights.dot(pca.components_)
# OR
# X_recreate = pca.inverse_transform(sample_weights)
# Plot explained variance per PC and cumulative
var_ratio = pca.explained_variance_ratio_
cumsum_var = np.cumsum(var_ratio)
plt.figure(figsize=(8, 6))
plt.bar(range(1,21), var_ratio.values.flatten(), color='r',alpha=0.5, align='center', label='individual explained variance')
plt.step(range(1,21), cumsum_var.values.flatten(), where='mid', label='cumulative explained variance')
|
{"/compute_displacement_subset.py": ["/demo.py"], "/compute_surface_nodes.py": ["/demo.py"]}
|
3,112
|
automation-monkey/Stock-Tracker-App-Test-Framework
|
refs/heads/main
|
/utils.py
|
import json
import requests
class BaseTest:
BASE_URL = 'http://localhost:8080/api/'
@classmethod
def _get_request(cls, url=None, headers=None, params=None):
request_url = '{}'.format(url)
response = requests.get(url=request_url,
headers=headers,
params=params)
print('Get request sent to {}'.format(request_url))
print('Request headers {}'.format(headers))
print('Content of the request {}'.format(response.content))
print('Status code of the request {}'.format(response.status_code))
print('*' * 100)
return response
@classmethod
def _post_request(cls, url=None, headers=None, cookies=None, data=None):
request_url = '{}'.format(url)
response = requests.post(url, headers=headers, cookies=cookies, data=data)
print('Post request sent to {}'.format(url))
print('Request headers {}'.format(headers))
print('Request data {}'.format(data))
print('Content of the request {}'.format(response.content))
print('Status code of the request {}'.format(response.status_code))
print('*' * 100)
return response
@classmethod
def _delete_request(cls, url=None, headers=None, cookies=None, data=None):
request_url = '{}'.format(url)
response = requests.delete(request_url, headers=headers, cookies=cookies, data=data)
print('Delete request sent to {}'.format(request_url))
print('Request headers {}'.format(headers))
print('Request data {}'.format(data))
print('Content of the request {}'.format(response.content))
print('Status code of the request {}'.format(response.status_code))
print('*' * 100)
return response
@classmethod
def _put_request(cls, url=None, headers=None, cookies=None, data=None):
request_url = '{}'.format(url)
response = requests.delete(request_url, headers=headers, cookies=cookies, data=data)
print('Put request sent to {}'.format(request_url))
print('Request headers {}'.format(headers))
print('Request data {}'.format(data))
print('Content of the request {}'.format(response.content))
print('Status code of the request {}'.format(response.status_code))
print('*' * 100)
return response
@classmethod
def _get_user_portfolio(cls):
r = cls._get_request(url=cls.BASE_URL+'portfolio')
portfolio = json.loads(r.content)
return portfolio
|
{"/tests/test_stock_tracker_app.py": ["/utils.py"]}
|
3,113
|
automation-monkey/Stock-Tracker-App-Test-Framework
|
refs/heads/main
|
/tests/test_stock_tracker_app.py
|
import json
import pytest
from utils import BaseTest
class TestStockTrackerApp(BaseTest):
VALUATION = {'valuation': float}
USER_PORTFOLIO_EXPECTED = {'AMZN': 1}
# Initiate portfolio with expected test data
@pytest.fixture(autouse=True, scope='session')
def create_user_portfolio(self):
self._post_request(url='http://localhost:8080/api/holding', data={'ticker': 'AMZN', 'units': 1})
@classmethod
def setup_class(cls):
cls.tracker_endpoint_url = BaseTest.BASE_URL
cls.holding_endpoint = cls.tracker_endpoint_url + 'holding'
cls.portfolio_endpoint = cls.tracker_endpoint_url + 'portfolio'
cls.valuation_endpoint = cls.tracker_endpoint_url + 'valuation'
def test_get_portfolio_check_data_type(self):
portfolio = self._get_user_portfolio()
# Compare response portfolio to expected result
assert portfolio == self.USER_PORTFOLIO_EXPECTED
def test_get_valuation_check_type_and_structure(self):
r = self._get_request(url=self.valuation_endpoint)
valuation = json.loads(r.content)
assert r.status_code == 200
assert valuation['valuation'] > 0
for key in valuation:
# Verify returned valuation dict types and structure
assert isinstance(valuation[key], self.VALUATION.get(key)), '{} key incorrect format'.format(key)
assert all(key in valuation for key in self.VALUATION), '{} key is missing'.format(key)
def test_add_update_and_remove_holding(self):
# This test adds the twitter stock to the portfolio,
# updates and deletes it. Verification is made for the whole flow.
ticker = 'TWTR'
# Add new ticker
r_add_ticker = self._post_request(url=self.holding_endpoint, data={'ticker': ticker, 'units': 5})
assert r_add_ticker.status_code == 201
# Check ticker is created
user_portfolio = self._get_user_portfolio()
assert ticker in user_portfolio and user_portfolio[ticker] == 5
# Update ticker value
r_update_ticker = self._post_request(url=self.holding_endpoint, data={'ticker': ticker, 'units': 6})
assert r_update_ticker.status_code == 201
# Check ticker is updated
user_portfolio = self._get_user_portfolio()
assert ticker in user_portfolio and user_portfolio[ticker] == 6
# # Delete ticker
r_del_ticker = self._delete_request(url=self.holding_endpoint, data={'ticker': ticker})
assert r_del_ticker.status_code == 204
# Check ticker is deleted
user_portfolio = self._get_user_portfolio()
assert ticker not in user_portfolio
@pytest.mark.parametrize('ticker', ('A A P L', '!@#$%', ' ', 'VOW3.DE', '1234'))
def test_add_new_stock_using_invalid_tracker(self, ticker):
r = self._post_request(url=self.holding_endpoint, data={'ticker': ticker, 'units': 1})
assert r.status_code == 400
@pytest.mark.parametrize('units', ('A', 'a', '@', ' ', '.'))
def test_add_new_stock_using_invalid_units(self, units):
r = self._post_request(url=self.holding_endpoint, data={'ticker': 'AAPL', 'units': units})
assert r.status_code == 400
|
{"/tests/test_stock_tracker_app.py": ["/utils.py"]}
|
3,145
|
aten2001/CV_assignment_2
|
refs/heads/master
|
/smaller_hough.py
|
import numpy as np
import skimage.feature
import skimage.color
import matplotlib.pyplot as plt
import scipy.misc
theta_pace_detect_offset = 80
threshold_no_gradient = 0.8
small_factor = 5
min_distance_between_centers = 10 / small_factor
# Hough with smaller vote space
# Does not include use gradient option, be care
def detectCircles(im, radius):
edge = skimage.feature.canny(skimage.color.rgb2gray(im), sigma=3)
plt.imshow(edge)
plt.show()
h, w, _ = im.shape
acc = dict()
acc_mat = np.zeros((h // small_factor, w // small_factor))
pace = int(radius * 0.5) + theta_pace_detect_offset
for i in range(h):
for j in range(w):
if edge[i, j]:
for div in range(pace):
theta = 2 * np.pi * div / pace
a = int((-radius * np.cos(theta) + i) / small_factor)
b = int((radius * np.sin(theta) + j) / small_factor)
if isValid(h, w, a, b):
acc[(a, b)] = acc.get((a, b), 0) + 1
acc_mat[a, b] += 1
# Getting centers of the circle + post-processing
threshold = np.max(acc_mat) * threshold_no_gradient
print(np.max(acc_mat))
plt.imshow(acc_mat)
plt.title('Smaller vote space accumulator - Radius = ' + str(radius))
plt.show()
acc_sorted = sorted(acc.items(), key=lambda kv: kv[1], reverse=True)
qualified_center = []
for k, v in acc_sorted:
if v < threshold:
break
else:
if not_close_center(k, qualified_center):
qualified_center.append((k[0] * small_factor, k[1] * small_factor))
# For constructing binary image with circle on it
return qualified_center
def not_close_center(pos, set):
for s in set:
if (pos[0] - s[0]) ** 2 + (pos[1] - s[1]) ** 2 <= min_distance_between_centers ** 2:
return False
return True
def isValid(h, w, a, b):
if a < 0 or a >= h // small_factor:
return False
if b < 0 or b >= w // small_factor:
return False
return True
|
{"/hough_test.py": ["/detectCircles.py", "/smaller_hough.py"], "/colorQuantizeMain.py": ["/quantizeRGB.py", "/quantizeHSV.py", "/computeQuantizationError.py"]}
|
3,146
|
aten2001/CV_assignment_2
|
refs/heads/master
|
/quantizeRGB.py
|
import scipy.cluster.vq
import scipy.misc
import numpy as np
import matplotlib.pyplot as plt
def quantizeRGB(origImg, k):
h,w,d = origImg.shape
processed = np.reshape(origImg, (w*h, d))
processed = np.array(processed, dtype=np.float64)
centroid, labels = scipy.cluster.vq.kmeans2(processed, k)
for i in range(h*w):
processed[i] = centroid[labels[i]]
res = np.reshape(processed, (h,w,d))
res = res.astype(np.uint8)
return res, centroid
|
{"/hough_test.py": ["/detectCircles.py", "/smaller_hough.py"], "/colorQuantizeMain.py": ["/quantizeRGB.py", "/quantizeHSV.py", "/computeQuantizationError.py"]}
|
3,147
|
aten2001/CV_assignment_2
|
refs/heads/master
|
/detectCircles.py
|
import numpy as np
import skimage.feature
import skimage.color
import matplotlib.pyplot as plt
import scipy.misc
min_distance_between_centers = 10
theta_pace_detect_offset = 80
threshold_no_gradient = 25
threshold_gradient = 8
theta_pace_draw = 100
def detectCircles(im, radius, useGradient):
edge = skimage.feature.canny(skimage.color.rgb2gray(im), sigma=3)
plt.imshow(edge)
plt.show()
h, w, _ = im.shape
acc = dict()
acc_mat = np.zeros((h, w))
pace = int(radius * 0.5) + theta_pace_detect_offset
if useGradient == 0:
for i in range(h):
for j in range(w):
if edge[i, j]:
for div in range(pace):
theta = 2 * np.pi * div / pace
a = int(-radius * np.cos(theta) + i)
b = int(radius * np.sin(theta) + j)
if isValid(h, w, a, b):
acc[(a, b)] = acc.get((a, b), 0) + 1
acc_mat[a, b] += 1
if useGradient == 1:
gradient_map = np.gradient(skimage.color.rgb2gray(im))
theta_map = np.arctan(-gradient_map[1]/gradient_map[0])
for i in range(h):
for j in range(w):
if edge[i, j]:
theta = theta_map[i,j]
if not theta == theta:
theta = np.pi/2
a = int(-radius * np.cos(theta) + i)
b = int(radius * np.sin(theta) + j)
for augmented_a_b in augment_a_b(a,b):
a_aug = augmented_a_b[0]
b_aug = augmented_a_b[1]
if isValid(h, w, a_aug, b_aug):
acc[(a_aug, b_aug)] = acc.get((a_aug, b_aug), 0) + 1
acc_mat[a_aug, b_aug] += 1
# Getting centers of the circle + post-processing
threshold = np.max(acc_mat) * 0.9
print(np.max(acc_mat))
plt.imshow(acc_mat)
plt.title('Accumulator - Use gradient = '+str(useGradient)+' Radius = '+str(radius))
plt.show()
acc_sorted = sorted(acc.items(), key=lambda kv: kv[1], reverse=True)
qualified_center = []
for k, v in acc_sorted:
if v < threshold:
break
else:
if not_close_center(k, qualified_center):
qualified_center.append(k)
return qualified_center
def not_close_center(pos, set):
for s in set:
if (pos[0] - s[0]) ** 2 + (pos[1] - s[1]) ** 2 <= min_distance_between_centers ** 2:
return False
return True
def isValid(h, w, a, b):
if a < 0 or a >= h:
return False
if b < 0 or b >= w:
return False
return True
def augment_a_b(a,b):
res = []
augment = [[-1,-1],[-1,0],[-1,1],
[0,-1],[0,0],[0,1],
[1,-1],[1,0],[1,1]]
for aug in augment:
res.append((a+aug[0], b+aug[1]))
return res
|
{"/hough_test.py": ["/detectCircles.py", "/smaller_hough.py"], "/colorQuantizeMain.py": ["/quantizeRGB.py", "/quantizeHSV.py", "/computeQuantizationError.py"]}
|
3,148
|
aten2001/CV_assignment_2
|
refs/heads/master
|
/hough_test.py
|
import scipy.misc
import detectCircles
import matplotlib.pyplot as plt
import smaller_hough
im = scipy.misc.imread('egg.jpg')
radius = 15
use_gradient = 1
centers = detectCircles.detectCircles(im, radius, use_gradient)
print('detect' + str(len(centers)) + ' centers')
xs = []
ys = []
for center in centers:
xs.append(center[0])
ys.append(center[1])
plt.imshow(im)
plt.scatter(ys, xs, s=radius**2,c='r')
plt.title('Image with detected circle - use gradient = '+str(use_gradient)+" radius = "+str(radius))
plt.show()
im = scipy.misc.imread('jupiter.jpg')
radius = 50
centers = smaller_hough.detectCircles(im, radius)
xs = []
ys = []
for center in centers:
xs.append(center[0])
ys.append(center[1])
plt.imshow(im)
plt.scatter(ys, xs, s=radius**2,c='r')
plt.title('Image with detected circle - use gradient = '+str(use_gradient)+" radius = "+str(radius))
plt.show()
|
{"/hough_test.py": ["/detectCircles.py", "/smaller_hough.py"], "/colorQuantizeMain.py": ["/quantizeRGB.py", "/quantizeHSV.py", "/computeQuantizationError.py"]}
|
3,149
|
aten2001/CV_assignment_2
|
refs/heads/master
|
/computeQuantizationError.py
|
import numpy as np
def computeQuantizationError(origImg, quantizedImg):
h, w, d = origImg.shape
sum = 0
sum = np.int64(sum)
for i in range(h):
for j in range(w):
error = (origImg[i, j, 0] - quantizedImg[i, j, 0]) ** 2 + \
(origImg[i, j, 1] - quantizedImg[i, j, 1]) ** 2 + \
(origImg[i, j, 2] - quantizedImg[i, j, 2]) ** 2
sum+=error
return sum
|
{"/hough_test.py": ["/detectCircles.py", "/smaller_hough.py"], "/colorQuantizeMain.py": ["/quantizeRGB.py", "/quantizeHSV.py", "/computeQuantizationError.py"]}
|
3,150
|
aten2001/CV_assignment_2
|
refs/heads/master
|
/colorQuantizeMain.py
|
import scipy
import quantizeRGB
import quantizeHSV
import matplotlib.pyplot as plt
import computeQuantizationError
img = scipy.misc.imread('fish.jpg')
# Begin test k=3
for k in [3, 6, 15]:
rgb_quantized_img, rgb_centroids = quantizeRGB.quantizeRGB(img, k)
hsv_quantized_img, hsv_centroids = quantizeHSV.quantizeHSV(img, k)
plt.imshow(rgb_quantized_img)
plt.title('RGB quantized image with k = ' + str(k))
plt.show()
plt.imshow(hsv_quantized_img)
plt.title('HSV quantized image with k = ' + str(k))
plt.show()
rgb_error = computeQuantizationError.computeQuantizationError(img, rgb_quantized_img)
hsv_error = computeQuantizationError.computeQuantizationError(img, hsv_quantized_img)
print('RGB SSD error with k = ', str(k), ' : ', str(rgb_error))
print('HSV SSD error with k = ', str(k), ' : ', str(hsv_error))
|
{"/hough_test.py": ["/detectCircles.py", "/smaller_hough.py"], "/colorQuantizeMain.py": ["/quantizeRGB.py", "/quantizeHSV.py", "/computeQuantizationError.py"]}
|
3,151
|
aten2001/CV_assignment_2
|
refs/heads/master
|
/quantizeHSV.py
|
import scipy.cluster.vq
import scipy.misc
import numpy as np
import matplotlib.pyplot as plt
import skimage.color
def quantizeHSV(origImg, k):
origImg = skimage.color.rgb2hsv(origImg)
h,w,d = origImg.shape
processed = np.reshape(origImg, (w*h, d))
processed = np.array(processed[:,0], dtype=np.float64)
centroid, labels = scipy.cluster.vq.kmeans2(processed, k)
for i in range(h*w):
processed[i] = centroid[labels[i]]
processed = np.reshape(processed, (h,w))
res = np.zeros((h,w,d))
for i in range(h):
for j in range(w):
res[i][j][0] = processed[i][j]
res[i][j][1] = origImg[i][j][1]
res[i][j][2] = origImg[i][j][2]
res = skimage.color.hsv2rgb(res)
return res, centroid
|
{"/hough_test.py": ["/detectCircles.py", "/smaller_hough.py"], "/colorQuantizeMain.py": ["/quantizeRGB.py", "/quantizeHSV.py", "/computeQuantizationError.py"]}
|
3,154
|
mazelife/figgy
|
refs/heads/master
|
/storage/admin.py
|
from django.contrib import admin
from storage.models import Book, Alias, Edition
class InlineAliasAdmin(admin.StackedInline):
model = Alias
extra = 0
class InlineEditionAdmin(admin.StackedInline):
model = Edition
extra = 0
class BookAdmin(admin.ModelAdmin):
inlines = [InlineEditionAdmin, InlineAliasAdmin]
list_display = ['id', 'title', 'number_of_editions']
def number_of_editions(self, obj):
return obj.edition_set.count()
admin.site.register(Book, BookAdmin)
|
{"/storage/tools.py": ["/storage/exceptions.py"]}
|
3,155
|
mazelife/figgy
|
refs/heads/master
|
/storage/tests/test_models.py
|
# encoding: utf-8
'''
Copyright (c) 2013 Safari Books Online. All rights reserved.
'''
import uuid
from django.test import TestCase
from storage import models
class TestModels(TestCase):
def setUp(self):
self.book = models.Book.objects.create(pk=str(uuid.uuid4()))
self.edition = models.Edition.objects.create(book=self.book, title="The Title", version="1.0")
def test_book_have_unicode_method(self):
'''The Book should have a __unicode__ method.'''
expected = 'Book {}'.format(self.book.pk)
self.assertEquals(expected, unicode(self.book))
|
{"/storage/tools.py": ["/storage/exceptions.py"]}
|
3,156
|
mazelife/figgy
|
refs/heads/master
|
/storage/exceptions.py
|
class BadDataFile(Exception):
"""
This exception is raised when a bad data file (XML) is encountered.
"""
|
{"/storage/tools.py": ["/storage/exceptions.py"]}
|
3,157
|
mazelife/figgy
|
refs/heads/master
|
/storage/tools.py
|
# encoding: utf-8
# Created by David Rideout <drideout@safaribooksonline.com> on 2/7/14 4:58 PM
# Copyright (c) 2013 Safari Books Online, LLC. All rights reserved.
from decimal import Decimal, InvalidOperation
from storage.models import Alias, Book, Edition
from storage.exceptions import BadDataFile
def process_book_element(book_element):
"""
Process a book element into the database. Operates on the following assumptions:
1. A book ID may have a bad value, but if any of it's aliases match a single existing book then an update
operation on that book can be done safely using the data in the <book> element.
2. For any given <book> element, if the aliases match more than one book, then one or more of them are incorrect and
an exception should be raised.
3. If the book is missing a <version> or if it's not number, an exception should be raised.
:param book: book element
:returns:
:raises: BadDataFile
"""
book_id = book_element.get('id')
aliases = [(a.get('scheme'), a.get('value')) for a in book_element.xpath('aliases/alias')]
edition_version = book_element.findtext('version')
try:
edition_version = Decimal(edition_version)
except InvalidOperation:
raise BadDataFile("Invalid version data: {} is not a decimal number.".format(edition_version))
except TypeError: # Raised when there is no <version> element.
raise BadDataFile("The version number is missing from this file.")
try:
book = Book.objects.get(pk=book_id)
except Book.DoesNotExist:
book = None
# Try to match on aliases, all of which must agree.
books_matched = {}
for scheme, value in aliases:
for alias in Alias.objects.filter(scheme=scheme, value=value):
if alias.book_id not in books_matched:
books_matched[alias.book_id] = alias.book
if len(books_matched) > 1:
raise BadDataFile("The aliases in this file match more than one book.")
# If a book was did not match by ID use the alias match if there was one, or create a new book.
if book is None:
if len(books_matched) == 1:
book = books_matched.values()[0]
else:
book = Book.objects.create(pk=book_id)
# Handle create/update of the book's edition.
edition, created = Edition.objects.get_or_create(book_id=book.pk, version=edition_version)
edition.title = book_element.findtext('title')
edition.description = book_element.findtext('description')
edition.save()
# Handle create/update of the book's aliases.
for scheme, value in aliases:
book.aliases.get_or_create(scheme=scheme, value=value)
|
{"/storage/tools.py": ["/storage/exceptions.py"]}
|
3,171
|
smartgang/KViewer
|
refs/heads/master
|
/ChildGraph.py
|
# -*- coding: utf-8 -*-
import sys
from PyQt5.QtCore import *
from PyQt5.QtGui import *
from PyQt5.QtWidgets import *
from Indexer import *
import pyqtgraph as pg
import pandas as pd
class ChildGraph(QWidget):
main_child_plt_changed = pyqtSignal(name='main_child_plt_changed')
def __init__(self, child=True):
super(ChildGraph, self).__init__()
self.child = child
self.frame_layout = QVBoxLayout(self)
self.para_setting_btn = QPushButton("参数设置")
self.para_setting_btn.setFixedWidth(100)
self.indexer_label = QLabel(self)
self.vLine = None
self.frame_layout.addLayout(self.header_layout())
self.raw_data = None
self.open_list = []
self.high_list = [] # 当子图为主图是(child=Fasle), 用来保留raw_data的high和low信息,用于计算Y轴范围
self.low_list = []
self.close_list = []
self.time_list = []
self.plt = None
self.indexer_class = None
self.indexer_name = ''
self.indexer_widget = None
def set_raw_data(self, raw_data):
# 外部调用,在主图获取到数据后传入数据
# 获取到数据同时加载plt,如果是主图则加载K线ohlc
self.raw_data = raw_data
if not self.child:
self.open_list = self.raw_data['open'].tolist()
self.high_list = self.raw_data['high'].tolist()
self.low_list = self.raw_data['low'].tolist()
self.close_list = self.raw_data['close'].tolist()
self.time_list = self.raw_data['strtime'].tolist()
self._setup_plt()
def _setup_candlestick(self):
# 为主图加载K线
csitem = CandlestickItem(self.raw_data)
axis = DateAxis(date_strings=self.time_list, orientation='bottom')
return csitem, axis
def _setup_plt(self):
if self.plt:
self.plt.close()
if not self.child:
# 为主图加载K线
item, axis = self._setup_candlestick()
self.plt = pg.PlotWidget(axisItems={'bottom': axis})
self.plt.addItem(item, )
self.plt.showGrid(x=True, y=True)
self.main_child_plt_changed.emit()
else:
self.plt = pg.PlotWidget()
self.plt.showGrid(x=True, y=True)
self.vLine = pg.InfiniteLine(angle=90, movable=False)
self.plt.addItem(self.vLine)
self.frame_layout.addWidget(self.plt)
def header_layout(self):
hbox = QHBoxLayout(self)
self.para_setting_btn.clicked.connect(self.set_indexer_parameter)
hbox.addWidget(self.indexer_label)
hbox.addWidget(self.para_setting_btn)
return hbox
def set_indexer_label(self, xpos):
# 设置指标标签的值,同时更新竖线位置
if self.indexer_class:
if xpos >= self.indexer_class.value_num:
return
value_str = self.indexer_class.get_indexer_value_text(xpos)
if not self.child:
# 主图要加上ohlc数据
open = self.open_list[xpos]
close = self.close_list[xpos]
if open > close:
c = 'green'
elif open < close:
c = 'red'
else:
c = 'black'
value_str += \
" <span style='color: %s'>open=%0.1f,high=%0.1f,low=%0.1f,close=%0.1f</span>,%s" % (
c, open, self.high_list[xpos], self.low_list[xpos], close, self.time_list[xpos])
self.indexer_label.setText(value_str)
self.vLine.setPos(xpos)
def set_indexer_parameter(self):
# 用户设置指标参数接口,弹出指标设置对话框供用户设置
# 已设置的指标加载已有参数,其余指标均加载默认参数
all_indexer_para_dic = get_all_indexer_para_dic()
current_indexer_name = 'MA'
if self.indexer_class:
all_indexer_para_dic[self.indexer_name] = self.indexer_class.get_para_dic()
current_indexer_name = self.indexer_class.indexer_name
self.indexer_widget = IndexerWidget(all_indexer_para_dic, current_indexer_name)
self.indexer_widget.signal_para_changed.connect(self.indexer_parameter_changed)
self.indexer_widget.show()
def indexer_parameter_changed(self, selected_indexer, para_dic):
# 接收用户设置的新参数,并更新显示
if selected_indexer == self.indexer_name:
# 所选指标与已有指标相同,则更新参数
self.indexer_class.update_parameter(para_dic[selected_indexer])
else:
# 所选指标与已有指标不同,则加载新指标
if self.indexer_class:
#self.plt.clear()
self._setup_plt()
indexer_class = indexer_mapping_dic[selected_indexer](self.raw_data, self.plt)
indexer_class.set_para_dic(para_dic[selected_indexer])
indexer_class.calculate_indexer_value()
indexer_class.draw_indexer()
self.indexer_class = indexer_class
self.indexer_name = selected_indexer
self.update_visual_range(200, 400)
self.set_indexer_label(200)
def update_visual_range(self, start_pos, end_pos):
if self.plt and self.indexer_class:
# Y轴自适应
value_n = self.indexer_class.value_num
start_pos = max(0, start_pos)
start_pos = min(start_pos, value_n)
end_pos = max(1, end_pos)
end_pos = min(end_pos, value_n)
if not self.child:
minY = min(self.low_list[start_pos:end_pos])
maxY = max(self.high_list[start_pos:end_pos])
else:
minY = 999999
maxY = 0
indexer_max_value, indexer_min_value = self.indexer_class.get_polar_value(start_pos, end_pos)
minY = min(minY, indexer_min_value)
maxY = max(maxY, indexer_max_value)
self.plt.setYRange(minY, maxY)
self.plt.setXRange(start_pos, end_pos, padding=0)
class DateAxis(pg.AxisItem):
def __init__(self, date_strings, orientation):
pg.AxisItem.__init__(self, orientation)
self.date_strings = date_strings
self.len = len(self.date_strings)
def tickStrings(self, values, scale, spacing):
strns = []
for x in values:
x1 = int(x)
if 0 <= x1 < self.len:
strns.append(self.date_strings[x1])
else:
strns.append('')
return strns
## Create a subclass of GraphicsObject.
## The only required methods are paint() and boundingRect()
## (see QGraphicsItem documentation)
class CandlestickItem(pg.GraphicsObject):
def __init__(self, data):
pg.GraphicsObject.__init__(self)
t = range(data.shape[0])
open = data.open.tolist()
high = data.high.tolist()
low = data.low.tolist()
close = data.close.tolist()
self.data = zip(t, open, close, low, high)
## data must have fields: time, open, close, min, max
self.generatePicture()
def generatePicture(self):
## pre-computing a QPicture object allows paint() to run much more quickly,
## rather than re-drawing the shapes every time.
self.picture = QPicture()
p = QPainter(self.picture)
p.setPen(pg.mkPen('w'))
w = (self.data[1][0] - self.data[0][0]) / 3.
for (t, open, close, min, max) in self.data:
p.drawLine(QPointF(t, min), QPointF(t, max))
if open > close:
p.setBrush(pg.mkBrush('g'))
else:
p.setBrush(pg.mkBrush('r'))
p.drawRect(QRectF(t - w, open, w * 2, close - open))
p.end()
def paint(self, p, *args):
p.drawPicture(0, 0, self.picture)
def boundingRect(self):
## boundingRect _must_ indicate the entire area that will be drawn on
## or else we will get artifacts and possibly crashing.
## (in this case, QPicture does all the work of computing the bouning rect for us)
return QRectF(self.picture.boundingRect())
if __name__ == '__main__':
app = QApplication(sys.argv)
demo = ChildGraph(False)
# demo.update_visual_range(200, 300)
demo.set_raw_data(1)
demo.show()
sys.exit(app.exec_())
|
{"/ChildGraph.py": ["/Indexer/__init__.py"], "/complexExample.py": ["/complex2.py"], "/decouple_window.py": ["/nullWindow.py"], "/kviewer_app.py": ["/kviewer2.py", "/indexer.py", "/parameter2.py"], "/Indexer/IndexerWidget.py": ["/Indexer/__init__.py"], "/MainFrame.py": ["/KViewer_new.py"], "/KViewer_new.py": ["/Indexer/__init__.py", "/ChildGraph.py", "/DataInterface/DataInterface.py"]}
|
3,172
|
smartgang/KViewer
|
refs/heads/master
|
/kviewer1.py
|
# -*- coding: utf-8 -*-
# Form implementation generated from reading ui file 'kviewer.ui'
#
# Created by: PyQt5 UI code generator 5.6
#
# WARNING! All changes made in this file will be lost!
from PyQt5 import QtCore, QtGui, QtWidgets
class Ui_MainWindow(object):
def setupUi(self, MainWindow):
MainWindow.setObjectName("MainWindow")
MainWindow.resize(800, 600)
self.centralwidget = QtWidgets.QWidget(MainWindow)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Expanding)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.centralwidget.sizePolicy().hasHeightForWidth())
self.centralwidget.setSizePolicy(sizePolicy)
self.centralwidget.setObjectName("centralwidget")
self.verticalLayout_2 = QtWidgets.QVBoxLayout(self.centralwidget)
self.verticalLayout_2.setObjectName("verticalLayout_2")
self.tabWidget = QtWidgets.QTabWidget(self.centralwidget)
self.tabWidget.setObjectName("tabWidget")
self.tab_para = QtWidgets.QWidget()
self.tab_para.setObjectName("tab_para")
self.groupBox_3 = QtWidgets.QGroupBox(self.tab_para)
self.groupBox_3.setGeometry(QtCore.QRect(400, 110, 361, 121))
self.groupBox_3.setObjectName("groupBox_3")
self.label = QtWidgets.QLabel(self.groupBox_3)
self.label.setGeometry(QtCore.QRect(30, 60, 41, 16))
self.label.setObjectName("label")
self.lineEdit_macd_short = QtWidgets.QLineEdit(self.groupBox_3)
self.lineEdit_macd_short.setEnabled(False)
self.lineEdit_macd_short.setGeometry(QtCore.QRect(80, 60, 41, 20))
self.lineEdit_macd_short.setObjectName("lineEdit_macd_short")
self.label_2 = QtWidgets.QLabel(self.groupBox_3)
self.label_2.setGeometry(QtCore.QRect(150, 60, 31, 16))
self.label_2.setObjectName("label_2")
self.lineEdit_macd_long = QtWidgets.QLineEdit(self.groupBox_3)
self.lineEdit_macd_long.setEnabled(False)
self.lineEdit_macd_long.setGeometry(QtCore.QRect(190, 60, 41, 20))
self.lineEdit_macd_long.setObjectName("lineEdit_macd_long")
self.label_3 = QtWidgets.QLabel(self.groupBox_3)
self.label_3.setGeometry(QtCore.QRect(260, 60, 21, 16))
self.label_3.setObjectName("label_3")
self.lineEdit_macd_m = QtWidgets.QLineEdit(self.groupBox_3)
self.lineEdit_macd_m.setEnabled(False)
self.lineEdit_macd_m.setGeometry(QtCore.QRect(280, 60, 41, 20))
self.lineEdit_macd_m.setObjectName("lineEdit_macd_m")
self.checkBox_macd = QtWidgets.QCheckBox(self.groupBox_3)
self.checkBox_macd.setGeometry(QtCore.QRect(30, 30, 71, 16))
self.checkBox_macd.setObjectName("checkBox_macd")
self.groupBox_2 = QtWidgets.QGroupBox(self.tab_para)
self.groupBox_2.setGeometry(QtCore.QRect(20, 110, 371, 121))
self.groupBox_2.setObjectName("groupBox_2")
self.gridLayoutWidget = QtWidgets.QWidget(self.groupBox_2)
self.gridLayoutWidget.setGeometry(QtCore.QRect(10, 40, 351, 80))
self.gridLayoutWidget.setObjectName("gridLayoutWidget")
self.gridLayout_2 = QtWidgets.QGridLayout(self.gridLayoutWidget)
self.gridLayout_2.setContentsMargins(0, 0, 0, 0)
self.gridLayout_2.setObjectName("gridLayout_2")
self.label_5 = QtWidgets.QLabel(self.gridLayoutWidget)
self.label_5.setObjectName("label_5")
self.gridLayout_2.addWidget(self.label_5, 0, 0, 1, 1)
self.label_7 = QtWidgets.QLabel(self.gridLayoutWidget)
self.label_7.setObjectName("label_7")
self.gridLayout_2.addWidget(self.label_7, 0, 4, 1, 1)
self.lineEdit_ma_n3 = QtWidgets.QLineEdit(self.gridLayoutWidget)
self.lineEdit_ma_n3.setEnabled(False)
self.lineEdit_ma_n3.setObjectName("lineEdit_ma_n3")
self.gridLayout_2.addWidget(self.lineEdit_ma_n3, 0, 5, 1, 1)
self.lineEdit_ma_n2 = QtWidgets.QLineEdit(self.gridLayoutWidget)
self.lineEdit_ma_n2.setEnabled(False)
self.lineEdit_ma_n2.setObjectName("lineEdit_ma_n2")
self.gridLayout_2.addWidget(self.lineEdit_ma_n2, 0, 3, 1, 1)
self.label_6 = QtWidgets.QLabel(self.gridLayoutWidget)
self.label_6.setObjectName("label_6")
self.gridLayout_2.addWidget(self.label_6, 0, 2, 1, 1)
self.lineEdit_ma_n1 = QtWidgets.QLineEdit(self.gridLayoutWidget)
self.lineEdit_ma_n1.setEnabled(False)
self.lineEdit_ma_n1.setObjectName("lineEdit_ma_n1")
self.gridLayout_2.addWidget(self.lineEdit_ma_n1, 0, 1, 1, 1)
self.label_8 = QtWidgets.QLabel(self.gridLayoutWidget)
self.label_8.setObjectName("label_8")
self.gridLayout_2.addWidget(self.label_8, 1, 0, 1, 1)
self.lineEdit_ma_n4 = QtWidgets.QLineEdit(self.gridLayoutWidget)
self.lineEdit_ma_n4.setEnabled(False)
self.lineEdit_ma_n4.setObjectName("lineEdit_ma_n4")
self.gridLayout_2.addWidget(self.lineEdit_ma_n4, 1, 1, 1, 1)
self.label_9 = QtWidgets.QLabel(self.gridLayoutWidget)
self.label_9.setObjectName("label_9")
self.gridLayout_2.addWidget(self.label_9, 1, 2, 1, 1)
self.lineEdit_ma_n5 = QtWidgets.QLineEdit(self.gridLayoutWidget)
self.lineEdit_ma_n5.setEnabled(False)
self.lineEdit_ma_n5.setObjectName("lineEdit_ma_n5")
self.gridLayout_2.addWidget(self.lineEdit_ma_n5, 1, 3, 1, 1)
self.label_10 = QtWidgets.QLabel(self.gridLayoutWidget)
self.label_10.setObjectName("label_10")
self.gridLayout_2.addWidget(self.label_10, 1, 4, 1, 1)
self.comboBox_ma = QtWidgets.QComboBox(self.gridLayoutWidget)
self.comboBox_ma.setEnabled(False)
self.comboBox_ma.setObjectName("comboBox_ma")
self.comboBox_ma.addItem("")
self.comboBox_ma.addItem("")
self.gridLayout_2.addWidget(self.comboBox_ma, 1, 5, 1, 1)
self.checkBox_ma = QtWidgets.QCheckBox(self.groupBox_2)
self.checkBox_ma.setGeometry(QtCore.QRect(20, 20, 71, 16))
self.checkBox_ma.setChecked(False)
self.checkBox_ma.setObjectName("checkBox_ma")
self.groupBox_5 = QtWidgets.QGroupBox(self.tab_para)
self.groupBox_5.setGeometry(QtCore.QRect(400, 10, 361, 91))
self.groupBox_5.setObjectName("groupBox_5")
self.pushButton_opr_file = QtWidgets.QPushButton(self.groupBox_5)
self.pushButton_opr_file.setGeometry(QtCore.QRect(30, 40, 75, 23))
self.pushButton_opr_file.setObjectName("pushButton_opr_file")
self.label_opr = QtWidgets.QLabel(self.groupBox_5)
self.label_opr.setGeometry(QtCore.QRect(130, 40, 54, 12))
self.label_opr.setObjectName("label_opr")
self.groupBox_4 = QtWidgets.QGroupBox(self.tab_para)
self.groupBox_4.setGeometry(QtCore.QRect(20, 10, 371, 90))
self.groupBox_4.setObjectName("groupBox_4")
self.gridLayoutWidget_2 = QtWidgets.QWidget(self.groupBox_4)
self.gridLayoutWidget_2.setGeometry(QtCore.QRect(9, 20, 351, 61))
self.gridLayoutWidget_2.setObjectName("gridLayoutWidget_2")
self.gridLayout_3 = QtWidgets.QGridLayout(self.gridLayoutWidget_2)
self.gridLayout_3.setSizeConstraint(QtWidgets.QLayout.SetDefaultConstraint)
self.gridLayout_3.setContentsMargins(0, 0, 0, 0)
self.gridLayout_3.setObjectName("gridLayout_3")
self.lineEdit_contract = QtWidgets.QLineEdit(self.gridLayoutWidget_2)
self.lineEdit_contract.setObjectName("lineEdit_contract")
self.gridLayout_3.addWidget(self.lineEdit_contract, 0, 2, 1, 1)
self.label_4 = QtWidgets.QLabel(self.gridLayoutWidget_2)
self.label_4.setObjectName("label_4")
self.gridLayout_3.addWidget(self.label_4, 0, 3, 1, 1)
self.label_12 = QtWidgets.QLabel(self.gridLayoutWidget_2)
self.label_12.setObjectName("label_12")
self.gridLayout_3.addWidget(self.label_12, 0, 0, 1, 1)
self.comboBox_bar = QtWidgets.QComboBox(self.gridLayoutWidget_2)
self.comboBox_bar.setObjectName("comboBox_bar")
self.comboBox_bar.addItem("")
self.comboBox_bar.addItem("")
self.comboBox_bar.addItem("")
self.comboBox_bar.addItem("")
self.comboBox_bar.addItem("")
self.comboBox_bar.addItem("")
self.comboBox_bar.addItem("")
self.gridLayout_3.addWidget(self.comboBox_bar, 0, 4, 1, 1)
self.label_13 = QtWidgets.QLabel(self.gridLayoutWidget_2)
self.label_13.setObjectName("label_13")
self.gridLayout_3.addWidget(self.label_13, 1, 0, 1, 1)
self.label_14 = QtWidgets.QLabel(self.gridLayoutWidget_2)
self.label_14.setObjectName("label_14")
self.gridLayout_3.addWidget(self.label_14, 1, 3, 1, 1)
self.dateEdit_end = QtWidgets.QDateEdit(self.gridLayoutWidget_2)
self.dateEdit_end.setDateTime(QtCore.QDateTime(QtCore.QDate(2018, 6, 30), QtCore.QTime(0, 0, 0)))
self.dateEdit_end.setObjectName("dateEdit_end")
self.gridLayout_3.addWidget(self.dateEdit_end, 1, 4, 1, 1)
self.dateEdit_start = QtWidgets.QDateEdit(self.gridLayoutWidget_2)
self.dateEdit_start.setObjectName("dateEdit_start")
self.gridLayout_3.addWidget(self.dateEdit_start, 1, 2, 1, 1)
self.pushButton_set_para = QtWidgets.QPushButton(self.tab_para)
self.pushButton_set_para.setGeometry(QtCore.QRect(360, 360, 75, 23))
self.pushButton_set_para.setObjectName("pushButton_set_para")
self.groupBox_6 = QtWidgets.QGroupBox(self.tab_para)
self.groupBox_6.setGeometry(QtCore.QRect(20, 240, 371, 80))
self.groupBox_6.setObjectName("groupBox_6")
self.lineEdit_kdj_n = QtWidgets.QLineEdit(self.groupBox_6)
self.lineEdit_kdj_n.setEnabled(False)
self.lineEdit_kdj_n.setGeometry(QtCore.QRect(40, 50, 51, 20))
self.lineEdit_kdj_n.setObjectName("lineEdit_kdj_n")
self.lineEdit_kdj_m1 = QtWidgets.QLineEdit(self.groupBox_6)
self.lineEdit_kdj_m1.setEnabled(False)
self.lineEdit_kdj_m1.setGeometry(QtCore.QRect(160, 50, 51, 20))
self.lineEdit_kdj_m1.setObjectName("lineEdit_kdj_m1")
self.lineEdit_kdj_m2 = QtWidgets.QLineEdit(self.groupBox_6)
self.lineEdit_kdj_m2.setEnabled(False)
self.lineEdit_kdj_m2.setGeometry(QtCore.QRect(270, 50, 51, 20))
self.lineEdit_kdj_m2.setObjectName("lineEdit_kdj_m2")
self.label_11 = QtWidgets.QLabel(self.groupBox_6)
self.label_11.setGeometry(QtCore.QRect(20, 50, 21, 16))
self.label_11.setObjectName("label_11")
self.label_15 = QtWidgets.QLabel(self.groupBox_6)
self.label_15.setGeometry(QtCore.QRect(140, 50, 21, 16))
self.label_15.setObjectName("label_15")
self.label_16 = QtWidgets.QLabel(self.groupBox_6)
self.label_16.setGeometry(QtCore.QRect(250, 50, 21, 16))
self.label_16.setObjectName("label_16")
self.checkBox_kdj = QtWidgets.QCheckBox(self.groupBox_6)
self.checkBox_kdj.setGeometry(QtCore.QRect(20, 20, 71, 16))
self.checkBox_kdj.setObjectName("checkBox_kdj")
self.groupBox_7 = QtWidgets.QGroupBox(self.tab_para)
self.groupBox_7.setGeometry(QtCore.QRect(400, 240, 361, 81))
self.groupBox_7.setObjectName("groupBox_7")
self.lineEdit_dmi_n = QtWidgets.QLineEdit(self.groupBox_7)
self.lineEdit_dmi_n.setEnabled(False)
self.lineEdit_dmi_n.setGeometry(QtCore.QRect(70, 50, 41, 20))
self.lineEdit_dmi_n.setObjectName("lineEdit_dmi_n")
self.lineEdit_dmi_m = QtWidgets.QLineEdit(self.groupBox_7)
self.lineEdit_dmi_m.setEnabled(False)
self.lineEdit_dmi_m.setGeometry(QtCore.QRect(190, 50, 41, 20))
self.lineEdit_dmi_m.setObjectName("lineEdit_dmi_m")
self.label_17 = QtWidgets.QLabel(self.groupBox_7)
self.label_17.setGeometry(QtCore.QRect(40, 50, 31, 16))
self.label_17.setObjectName("label_17")
self.label_18 = QtWidgets.QLabel(self.groupBox_7)
self.label_18.setGeometry(QtCore.QRect(170, 50, 21, 16))
self.label_18.setObjectName("label_18")
self.checkBox_dmi = QtWidgets.QCheckBox(self.groupBox_7)
self.checkBox_dmi.setGeometry(QtCore.QRect(30, 30, 71, 16))
self.checkBox_dmi.setObjectName("checkBox_dmi")
self.tabWidget.addTab(self.tab_para, "")
self.tab_plot = QtWidgets.QWidget()
self.tab_plot.setObjectName("tab_plot")
self.verticalLayout_3 = QtWidgets.QVBoxLayout(self.tab_plot)
self.verticalLayout_3.setContentsMargins(0, 0, 0, 0)
self.verticalLayout_3.setObjectName("verticalLayout_3")
self.verticalLayout = QtWidgets.QVBoxLayout()
self.verticalLayout.setContentsMargins(-1, -1, -1, 0)
self.verticalLayout.setObjectName("verticalLayout")
self.horizontalLayout_plot_field = QtWidgets.QHBoxLayout()
self.horizontalLayout_plot_field.setObjectName("horizontalLayout_plot_field")
self.label_para = QtWidgets.QLabel(self.tab_plot)
self.label_para.setFrameShape(QtWidgets.QFrame.Box)
self.label_para.setObjectName("label_para")
self.horizontalLayout_plot_field.addWidget(self.label_para)
self.label_point = QtWidgets.QLabel(self.tab_plot)
self.label_point.setFrameShape(QtWidgets.QFrame.Box)
self.label_point.setObjectName("label_point")
self.horizontalLayout_plot_field.addWidget(self.label_point)
self.label_file = QtWidgets.QLabel(self.tab_plot)
self.label_file.setFrameShape(QtWidgets.QFrame.Box)
self.label_file.setObjectName("label_file")
self.horizontalLayout_plot_field.addWidget(self.label_file)
self.verticalLayout.addLayout(self.horizontalLayout_plot_field)
self.verticalLayout_3.addLayout(self.verticalLayout)
self.tabWidget.addTab(self.tab_plot, "")
self.verticalLayout_2.addWidget(self.tabWidget)
MainWindow.setCentralWidget(self.centralwidget)
self.menubar = QtWidgets.QMenuBar(MainWindow)
self.menubar.setGeometry(QtCore.QRect(0, 0, 800, 23))
self.menubar.setObjectName("menubar")
MainWindow.setMenuBar(self.menubar)
self.statusbar = QtWidgets.QStatusBar(MainWindow)
self.statusbar.setObjectName("statusbar")
MainWindow.setStatusBar(self.statusbar)
self.retranslateUi(MainWindow)
self.tabWidget.setCurrentIndex(0)
QtCore.QMetaObject.connectSlotsByName(MainWindow)
def retranslateUi(self, MainWindow):
_translate = QtCore.QCoreApplication.translate
MainWindow.setWindowTitle(_translate("MainWindow", "MainWindow"))
self.groupBox_3.setTitle(_translate("MainWindow", "MACD参数"))
self.label.setText(_translate("MainWindow", "Short"))
self.lineEdit_macd_short.setText(_translate("MainWindow", "12"))
self.label_2.setText(_translate("MainWindow", "Long"))
self.lineEdit_macd_long.setText(_translate("MainWindow", "26"))
self.label_3.setText(_translate("MainWindow", "M"))
self.lineEdit_macd_m.setText(_translate("MainWindow", "9"))
self.checkBox_macd.setText(_translate("MainWindow", "MACD"))
self.groupBox_2.setTitle(_translate("MainWindow", "MA参数"))
self.label_5.setText(_translate("MainWindow", "N1"))
self.label_7.setText(_translate("MainWindow", "N3"))
self.lineEdit_ma_n3.setText(_translate("MainWindow", "20"))
self.lineEdit_ma_n2.setText(_translate("MainWindow", "10"))
self.label_6.setText(_translate("MainWindow", "N2"))
self.lineEdit_ma_n1.setText(_translate("MainWindow", "5"))
self.label_8.setText(_translate("MainWindow", "N4"))
self.lineEdit_ma_n4.setText(_translate("MainWindow", "30"))
self.label_9.setText(_translate("MainWindow", "N5"))
self.lineEdit_ma_n5.setText(_translate("MainWindow", "50"))
self.label_10.setText(_translate("MainWindow", "算法"))
self.comboBox_ma.setItemText(0, _translate("MainWindow", "MA"))
self.comboBox_ma.setItemText(1, _translate("MainWindow", "EMA"))
self.checkBox_ma.setText(_translate("MainWindow", "MA"))
self.groupBox_5.setTitle(_translate("MainWindow", "回测文件"))
self.pushButton_opr_file.setText(_translate("MainWindow", "打开"))
self.label_opr.setText(_translate("MainWindow", "TextLabel"))
self.groupBox_4.setTitle(_translate("MainWindow", "公共参数"))
self.lineEdit_contract.setText(_translate("MainWindow", "RB1810"))
self.label_4.setText(_translate("MainWindow", "周期"))
self.label_12.setText(_translate("MainWindow", "合约"))
self.comboBox_bar.setItemText(0, _translate("MainWindow", "3600"))
self.comboBox_bar.setItemText(1, _translate("MainWindow", "1800"))
self.comboBox_bar.setItemText(2, _translate("MainWindow", "900"))
self.comboBox_bar.setItemText(3, _translate("MainWindow", "600"))
self.comboBox_bar.setItemText(4, _translate("MainWindow", "300"))
self.comboBox_bar.setItemText(5, _translate("MainWindow", "60"))
self.comboBox_bar.setItemText(6, _translate("MainWindow", "0"))
self.label_13.setText(_translate("MainWindow", "开始时间"))
self.label_14.setText(_translate("MainWindow", "结束时间"))
self.pushButton_set_para.setText(_translate("MainWindow", "画图"))
self.groupBox_6.setTitle(_translate("MainWindow", "KDJ参数"))
self.lineEdit_kdj_n.setText(_translate("MainWindow", "9"))
self.lineEdit_kdj_m1.setText(_translate("MainWindow", "3"))
self.lineEdit_kdj_m2.setText(_translate("MainWindow", "3"))
self.label_11.setText(_translate("MainWindow", "N"))
self.label_15.setText(_translate("MainWindow", "M1"))
self.label_16.setText(_translate("MainWindow", "M2"))
self.checkBox_kdj.setText(_translate("MainWindow", "KDJ"))
self.groupBox_7.setTitle(_translate("MainWindow", "DMI参数"))
self.lineEdit_dmi_n.setText(_translate("MainWindow", "14"))
self.lineEdit_dmi_m.setText(_translate("MainWindow", "6"))
self.label_17.setText(_translate("MainWindow", "N"))
self.label_18.setText(_translate("MainWindow", "M"))
self.checkBox_dmi.setText(_translate("MainWindow", "DMI"))
self.tabWidget.setTabText(self.tabWidget.indexOf(self.tab_para), _translate("MainWindow", "参数设置"))
self.label_para.setText(_translate("MainWindow", "TextLabel"))
self.label_point.setText(_translate("MainWindow", "TextLabel"))
self.label_file.setText(_translate("MainWindow", "TextLabel"))
self.tabWidget.setTabText(self.tabWidget.indexOf(self.tab_plot), _translate("MainWindow", "行情"))
|
{"/ChildGraph.py": ["/Indexer/__init__.py"], "/complexExample.py": ["/complex2.py"], "/decouple_window.py": ["/nullWindow.py"], "/kviewer_app.py": ["/kviewer2.py", "/indexer.py", "/parameter2.py"], "/Indexer/IndexerWidget.py": ["/Indexer/__init__.py"], "/MainFrame.py": ["/KViewer_new.py"], "/KViewer_new.py": ["/Indexer/__init__.py", "/ChildGraph.py", "/DataInterface/DataInterface.py"]}
|
3,173
|
smartgang/KViewer
|
refs/heads/master
|
/indexer.py
|
# -*- coding: utf-8 -*-
"""
指标类,用于管理指标相内容:
1.参数,包括参数控件的内容
para_name:参数名列表
para_dic: 参数字典,键为参数名,值为参数值
para_widgets_dic: 参数按键字典, 键为参数名,值为控件名
2.数据
data_dic:数据字典,键为参数名,值为数据
3.画图
plt:主图控件
plt_dic:子图控件字典,键为参数名,值为子图控件
"""
class IndexerBase(object):
color_list = ['w', 'y', 'c', 'r', 'g']
def __init__(self, plt, ):
self.is_avtived = True
self.plt = plt
self.para_name = []
self.para_dic = {}
self.para_widgets_dic = {}
self.data_dic = {}
self.plt_dic = {}
pass
def draw(self):
pass
def reflesh(self):
pass
def set_data(self):
pass
def set_all_para(self):
for k, v in self.para_widgets_dic.items():
p = self.set_para(v)
if p:
self.para_dic[k] = p
else:
self.para_dic[k] = 0
self.set_data()
def set_para(self, lindEdit_widgets):
t = lindEdit_widgets.text()
if t:
try:
p=int(t)
return p
except:
print (u"请检查输入内容,只接受数字")
return None
def get_indexer_value_text(self, pos):
# 根据传入的位置返回一个指标值的字符串
t = ""
i = 0
for pname in self.para_name:
c = self.color_list[i]
t += "<span style='color: %s'>%s=%0.3f </span>" % (c, pname, self.data_dic[pname][pos])
i += 1
return t
class Indexer_MA(IndexerBase):
def __init__(self, plt, rawdata, para_widgets_list):
super(IndexerBase, self).__init__()
self.plt = plt
self.is_avtived = True
self.plt = plt
self.para_name = []
self.para_dic = {}
self.para_widgets_dic = {}
self.data_dic = {}
self.plt_dic = {}
self.para_name = ['N1', 'N2', 'N3', 'N4', 'N5']
# 获取原始数据
self.series_close = rawdata['close']
# 获取参数
for i in range(len(para_widgets_list)):
para_name = self.para_name[i]
pwidget = para_widgets_list[i]
self.para_widgets_dic[para_name] = pwidget
self.set_all_para()
# 准备数据
self.set_data()
pass
def draw(self):
if self.is_avtived:
for i in range(len(self.para_name)):
pname = self.para_name[i]
if pname in self.para_dic.keys():
self.plt_dic[pname]=self.plt.plot(name=pname,pen=self.color_list[i])
self.plt_dic[pname].setData(self.data_dic[pname])
def reflesh(self):
for k, d in self.data_dic.items():
self.plt_dic[k].setData(d)
def set_data(self,):
for k, d in self.para_dic.items():
self.data_dic[k] = self.series_close.rolling(d).mean()
|
{"/ChildGraph.py": ["/Indexer/__init__.py"], "/complexExample.py": ["/complex2.py"], "/decouple_window.py": ["/nullWindow.py"], "/kviewer_app.py": ["/kviewer2.py", "/indexer.py", "/parameter2.py"], "/Indexer/IndexerWidget.py": ["/Indexer/__init__.py"], "/MainFrame.py": ["/KViewer_new.py"], "/KViewer_new.py": ["/Indexer/__init__.py", "/ChildGraph.py", "/DataInterface/DataInterface.py"]}
|
3,174
|
smartgang/KViewer
|
refs/heads/master
|
/complex2.py
|
# -*- coding: utf-8 -*-
#from PySide import QtCore, QtGui
from PyQt5 import QtCore, QtGui, QtWidgets
class Ui_MainWindow(object):
def setupUi(self, MainWindow):
MainWindow.setObjectName("MainWindow")
MainWindow.resize(803, 600)
self.centralwidget = QtWidgets.QWidget(MainWindow)
self.centralwidget.setObjectName("centralwidget")
self.tabWidget = QtWidgets.QTabWidget(self.centralwidget)
self.tabWidget.setGeometry(QtCore.QRect(0, 0, 801, 551))
self.tabWidget.setObjectName("tabWidget")
self.tab = QtWidgets.QWidget()
self.tab.setObjectName("tab")
self.tabWidget_2 = QtWidgets.QTabWidget(self.tab)
self.tabWidget_2.setGeometry(QtCore.QRect(0, 0, 801, 531))
self.tabWidget_2.setObjectName("tabWidget_2")
self.tab_3 = QtWidgets.QWidget()
self.tab_3.setObjectName("tab_3")
self.treeWidget = QtWidgets.QTreeWidget(self.tab_3)
self.treeWidget.setGeometry(QtCore.QRect(0, 0, 791, 501))
self.treeWidget.setObjectName("treeWidget")
item_0 = QtWidgets.QTreeWidgetItem(self.treeWidget)
item_1 = QtWidgets.QTreeWidgetItem(item_0)
self.tabWidget_2.addTab(self.tab_3, "")
self.tab_4 = QtWidgets.QWidget()
self.tab_4.setObjectName("tab_4")
self.verticalLayoutWidget = QtWidgets.QWidget(self.tab_4)
self.verticalLayoutWidget.setGeometry(QtCore.QRect(0, 0, 791, 501))
self.verticalLayoutWidget.setObjectName("verticalLayoutWidget")
self.verticalLayout = QtWidgets.QVBoxLayout(self.verticalLayoutWidget)
self.verticalLayout.setContentsMargins(0, 0, 0, 0)
self.verticalLayout.setObjectName("verticalLayout")
self.dateEdit = QtWidgets.QDateEdit(self.verticalLayoutWidget)
self.dateEdit.setObjectName("dateEdit")
self.verticalLayout.addWidget(self.dateEdit)
self.calendarWidget = QtWidgets.QCalendarWidget(self.verticalLayoutWidget)
self.calendarWidget.setObjectName("calendarWidget")
self.verticalLayout.addWidget(self.calendarWidget)
self.tabWidget_2.addTab(self.tab_4, "")
self.tabWidget.addTab(self.tab, "")
self.tab_2 = QtWidgets.QWidget()
self.tab_2.setObjectName("tab_2")
self.groupBox = QtWidgets.QGroupBox(self.tab_2)
self.groupBox.setGeometry(QtCore.QRect(20, 10, 73, 92))
self.groupBox.setObjectName("groupBox")
self.verticalLayout_2 = QtWidgets.QVBoxLayout(self.groupBox)
self.verticalLayout_2.setObjectName("verticalLayout_2")
self.radioButton = QtWidgets.QRadioButton(self.groupBox)
self.radioButton.setObjectName("radioButton")
self.verticalLayout_2.addWidget(self.radioButton)
self.radioButton_2 = QtWidgets.QRadioButton(self.groupBox)
self.radioButton_2.setObjectName("radioButton_2")
self.verticalLayout_2.addWidget(self.radioButton_2)
self.radioButton_3 = QtWidgets.QRadioButton(self.groupBox)
self.radioButton_3.setObjectName("radioButton_3")
self.verticalLayout_2.addWidget(self.radioButton_3)
self.groupBox_2 = QtWidgets.QGroupBox(self.tab_2)
self.groupBox_2.setGeometry(QtCore.QRect(440, 30, 321, 151))
self.groupBox_2.setObjectName("groupBox_2")
self.widget = QtWidgets.QWidget(self.groupBox_2)
self.widget.setGeometry(QtCore.QRect(60, 30, 172, 102))
self.widget.setObjectName("widget")
self.horizontalLayout = QtWidgets.QHBoxLayout(self.widget)
self.horizontalLayout.setContentsMargins(0, 0, 0, 0)
self.horizontalLayout.setObjectName("horizontalLayout")
self.dial = QtWidgets.QDial(self.widget)
self.dial.setObjectName("dial")
self.horizontalLayout.addWidget(self.dial)
self.lcdNumber = QtWidgets.QLCDNumber(self.widget)
self.lcdNumber.setObjectName("lcdNumber")
self.horizontalLayout.addWidget(self.lcdNumber)
self.fontComboBox = QtWidgets.QFontComboBox(self.tab_2)
self.fontComboBox.setGeometry(QtCore.QRect(60, 230, 381, 22))
self.fontComboBox.setObjectName("fontComboBox")
self.label = QtWidgets.QLabel(self.tab_2)
self.label.setGeometry(QtCore.QRect(60, 290, 381, 71))
self.label.setScaledContents(False)
self.label.setAlignment(QtCore.Qt.AlignCenter)
self.label.setWordWrap(False)
self.label.setObjectName("label")
self.progressBar = QtWidgets.QProgressBar(self.tab_2)
self.progressBar.setGeometry(QtCore.QRect(60, 480, 661, 23))
self.progressBar.setProperty("value", 24)
self.progressBar.setObjectName("progressBar")
self.tabWidget.addTab(self.tab_2, "")
self.tab_5 = QtWidgets.QWidget()
self.tab_5.setObjectName("tab_5")
self.verticalLayoutWidget_2 = QtWidgets.QWidget(self.tab_5)
self.verticalLayoutWidget_2.setGeometry(QtCore.QRect(-1, -1, 791, 531))
self.verticalLayoutWidget_2.setObjectName("verticalLayoutWidget_2")
self.verticalLayout_3 = QtWidgets.QVBoxLayout(self.verticalLayoutWidget_2)
self.verticalLayout_3.setContentsMargins(0, 0, 0, 0)
self.verticalLayout_3.setObjectName("verticalLayout_3")
self.tabWidget.addTab(self.tab_5, "")
MainWindow.setCentralWidget(self.centralwidget)
self.menubar = QtWidgets.QMenuBar(MainWindow)
self.menubar.setGeometry(QtCore.QRect(0, 0, 803, 23))
self.menubar.setObjectName("menubar")
MainWindow.setMenuBar(self.menubar)
self.statusbar = QtWidgets.QStatusBar(MainWindow)
self.statusbar.setObjectName("statusbar")
MainWindow.setStatusBar(self.statusbar)
self.retranslateUi(MainWindow)
self.tabWidget.setCurrentIndex(1)
self.tabWidget_2.setCurrentIndex(1)
QtCore.QMetaObject.connectSlotsByName(MainWindow)
def retranslateUi(self, MainWindow):
MainWindow.setWindowTitle(QtWidgets.QApplication.translate("MainWindow", "MainWindow"))
self.treeWidget.headerItem().setText(0, QtWidgets.QApplication.translate("MainWindow", u"第一列"))
self.treeWidget.headerItem().setText(1, QtWidgets.QApplication.translate("MainWindow", "New Column"))
__sortingEnabled = self.treeWidget.isSortingEnabled()
self.treeWidget.setSortingEnabled(False)
self.treeWidget.topLevelItem(0).setText(0, QtWidgets.QApplication.translate("MainWindow", u"子条目一"))
self.treeWidget.topLevelItem(0).child(0).setText(0, QtWidgets.QApplication.translate("MainWindow", u"子条目一一"))
self.treeWidget.setSortingEnabled(__sortingEnabled)
self.tabWidget_2.setTabText(self.tabWidget_2.indexOf(self.tab_3), QtWidgets.QApplication.translate("MainWindow", u"树"))
self.tabWidget_2.setTabText(self.tabWidget_2.indexOf(self.tab_4), QtWidgets.QApplication.translate("MainWindow", u"日历"))
self.tabWidget.setTabText(self.tabWidget.indexOf(self.tab), QtWidgets.QApplication.translate("MainWindow", "Tab 1"))
self.groupBox.setTitle(QtWidgets.QApplication.translate("MainWindow", u"功能选择"))
self.radioButton.setText(QtWidgets.QApplication.translate("MainWindow", u"默认"))
self.radioButton_2.setText(QtWidgets.QApplication.translate("MainWindow", u"重置"))
self.radioButton_3.setText(QtWidgets.QApplication.translate("MainWindow", u"选项3"))
self.groupBox_2.setTitle(QtWidgets.QApplication.translate("MainWindow", u"移动刻度盘"))
self.label.setText(QtWidgets.QApplication.translate("MainWindow", "TextLabel"))
self.tabWidget.setTabText(self.tabWidget.indexOf(self.tab_2), QtWidgets.QApplication.translate("MainWindow", "Tab 2"))
self.tabWidget.setTabText(self.tabWidget.indexOf(self.tab_5), QtWidgets.QApplication.translate("MainWindow", "绘图"))
|
{"/ChildGraph.py": ["/Indexer/__init__.py"], "/complexExample.py": ["/complex2.py"], "/decouple_window.py": ["/nullWindow.py"], "/kviewer_app.py": ["/kviewer2.py", "/indexer.py", "/parameter2.py"], "/Indexer/IndexerWidget.py": ["/Indexer/__init__.py"], "/MainFrame.py": ["/KViewer_new.py"], "/KViewer_new.py": ["/Indexer/__init__.py", "/ChildGraph.py", "/DataInterface/DataInterface.py"]}
|
3,175
|
smartgang/KViewer
|
refs/heads/master
|
/complexExample.py
|
# -*- coding: utf-8 -*-
import complex2
from PyQt5 import QtCore, QtWidgets, QtGui
import sys
import time
import pyqtgraph as pg
import pandas as pd
import tushare as ts
import datetime
from matplotlib.pylab import date2num
class MainWindow(object):
def __init__(self):
app = QtWidgets.QApplication(sys.argv)
MainWindow = QtWidgets.QMainWindow()
self.ui = complex2.Ui_MainWindow()
self.ui.setupUi(MainWindow)
self.update_date()
self.update_calendar()
self.set_lcd()
self.set_dial()
#self.zero_progress()
#self.click_radio3()
self.update_progressbar()
self.set_font()
# 数据要解好,供多个用,这样才省事
#hist_data = ts.get_hist_data('600519', start='2010-05-01', end='2017-11-04')
#hist_data.to_csv('hist_data.csv')
hist_data = pd.read_csv('hist_data.csv')
self.t = range(hist_data.shape[0])
self.open = hist_data.open.tolist()
self.high = hist_data.high.tolist()
self.low = hist_data.low.tolist()
self.close = hist_data.close.tolist()
packdate = zip(self.t,self.open, self.close, self.low, self.high)
ma5 = hist_data.close.rolling(5).mean().tolist()
self.plt1 = self.chart(hist_data['date'].tolist(),packdate)
self.plt2 = self.chart2(self.t, self.close)
self.plt1.plot(ma5)
# 下面第2个图的范围设置框
self.region = pg.LinearRegionItem()
self.region.setZValue(10)
self.region.sigRegionChanged.connect(self.update_plt1)
self.plt1.sigRangeChanged.connect(self.updateRegion)
self.region.setRegion([0, 100])
# Add the LinearRegionItem to the ViewBox, but tell the ViewBox to exclude this
# item when doing auto-range calculations.
self.plt2.addItem(self.region, ignoreBounds=True)
self.ui.verticalLayout_3.addWidget(self.plt1)
self.ui.verticalLayout_3.addWidget(self.plt2)
MainWindow.show()
sys.exit(app.exec_())
def update_date(self):
self.ui.dateEdit.setDate(self.ui.calendarWidget.selectedDate())
def update_calendar(self):
self.ui.calendarWidget.selectionChanged.connect(self.update_date)
def set_lcd(self):
self.ui.lcdNumber.display(self.ui.dial.value())
def set_dial(self):
self.ui.dial.valueChanged['int'].connect(self.set_lcd)
#按钮2重置进度栏
def zero_progress(self):
self.ui.radioButton_2.clicked.connect(self.ui.progressBar.reset)
def update_progress(self):
value = self.ui.lcdNumber.value()
self.ui.progressBar.setValue(value)
def click_radio3(self):
self.ui.radioButton_3.clicked.connect(self.update_progress)
def set_font(self):
self.ui.fontComboBox.activated['QString'].connect(self.ui.label.setText)
def progressBar_counter(self, start_value=0):
self.run_thread = RunThread(parent=None, counter_start=start_value)
self.run_thread.start()
self.run_thread.counter_value.connect(self.set_progressbar)
def set_progressbar(self, counter):
if not self.stop_progress:
self.ui.progressBar.setValue(counter)
# 多进程的方式控制progressBar
# RunThread会一直计时,并发出int类型的信号
# start_progressbar开始时,会先取得progressbar的值,然后再往下数,这样ui上看起来progressbar是连着上一次中断的位置往下的
# 实际上点stop的时候,RunThread进程已经结束,重新开始时是新的线程了
def update_progressbar(self):
self.ui.radioButton.clicked.connect(self.start_progressbar)
self.ui.radioButton_2.clicked.connect(self.stop_progressbar)
self.ui.radioButton_3.clicked.connect(self.reset_progressbar)
self.progress_value = 0
self.stop_progress = False
def start_progressbar(self):
self.stop_progress = False
self.progress_value = self.ui.progressBar.value()
self.progressBar_counter(self.progress_value)
def stop_progressbar(self):
self.stop_progress = True
try:
self.run_thread.stop()
except:
pass
def reset_progressbar(self):
self.progress_value = 0
self.ui.progressBar.reset()
#self.stop_progress = False
self.stop_progressbar()
def chart(self,date_list, data_list):
"""
data_list = []
i = 0
for dates, row in hist_data.iterrows():
#date_time = datetime.datetime.strptime(dates, "%Y-%m-%d")
#t = date2num(date_time)
open, high, close, low = row[:4]
datas = (i, open, close, low, high)
i+=1
data_list.append(datas)
# axis_dic = dict(enumerate(axis))
#print (data_list)
"""
item = CandlestickItem(data_list)
axis = DateAxis(date_strings=date_list, orientation='bottom')
plt = pg.PlotWidget(axisItems={'bottom': axis})
#plt = pg.PlotWidget()
plt.addItem(item, )
# plt.setXRange()
plt.showGrid(x=True, y=True)
return plt
def chart2(self,x,y):
#y = hist_data['close'].tolist()
#x_datas =hist_data.index.tolist()
#x=range(len(y))
#for x1 in x_datas:
# date_time = datetime.datetime.strptime(x1, "%Y-%m-%d")
# x.append(date2num(date_time))
# axis_dic = dict(enumerate(axis))
#print (close_list)
plt = pg.PlotWidget()
plt.addLegend() # 加上图标
plt.plot(x=x,y=y, pen="w", name='close')
#plt.addItem(item, )
# plt.setXRange()
#plt.showGrid(x=True, y=True)
return plt
def update_plt1(self):
self.region.setZValue(10)
minX, maxX = self.region.getRegion()
#Y轴自适应
int_minY = max(0,int(minX))
int_maxY = max(1, int(maxX))
minY = min(self.low[int_minY:int_maxY]) - 5
maxY = max(self.high[int_minY:int_maxY]) +5
self.plt1.setYRange(minY, maxY)
self.plt1.setXRange(minX, maxX, padding=0)
def updateRegion(self,window, viewRange):
rgn = viewRange[0]
self.region.setRegion(rgn)
class RunThread(QtCore.QThread):
# 定义一个信号,内容为int
counter_value = QtCore.pyqtSignal(int)
def __init__(self, parent=None, counter_start=0):
super(RunThread, self).__init__(parent)
self.counter = counter_start
self.is_running = True
def run(self):
while self.counter < 100 and self.is_running == True:
time.sleep(0.1)
self.counter += 1
print (self.counter)
self.counter_value.emit(self.counter) # 发出信号
def stop(self):
self.is_running = False
print ("线程停止中...")
self.terminate()
class DateAxis(pg.AxisItem):
def __init__(self, date_strings, orientation):
pg.AxisItem.__init__(self,orientation)
self.date_strings = date_strings
self.len = len(self.date_strings)
def tickStrings(self, values, scale, spacing):
"""
strns = []
rng = max(values) - min(values)
# if rng < 120:
# return pg.AxisItem.tickStrings(self, values, scale, spacing)
if rng < 3600 * 24:
string = '%H:%M:%S'
label1 = '%b %d -'
label2 = ' %b %d, %Y'
elif rng >= 3600 * 24 and rng < 3600 * 24 * 30:
string = '%d'
label1 = '%b - '
label2 = '%b, %Y'
elif rng >= 3600 * 24 * 30 and rng < 3600 * 24 * 30 * 24:
string = '%b'
label1 = '%Y -'
label2 = ' %Y'
elif rng >= 3600 * 24 * 30 * 24:
string = '%Y'
label1 = ''
label2 = ''
for x in values:
try:
strns.append(time.strftime(string, time.localtime(x)))
except ValueError: ## Windows can't handle dates before 1970
strns.append('')
try:
label = time.strftime(label1, time.localtime(min(values))) + time.strftime(label2,
time.localtime(max(values)))
except ValueError:
label = ''
# self.setLabel(text=label)
return strns
"""
#print values
strns = []
for x in values:
x1 = int(x)
if 0 <= x1 < self.len:
strns.append(self.date_strings[x1])
else:
strns.append('')
return strns
## Create a subclass of GraphicsObject.
## The only required methods are paint() and boundingRect()
## (see QGraphicsItem documentation)
class CandlestickItem(pg.GraphicsObject):
def __init__(self, data):
pg.GraphicsObject.__init__(self)
self.data = data ## data must have fields: time, open, close, min, max
self.generatePicture()
def generatePicture(self):
## pre-computing a QPicture object allows paint() to run much more quickly,
## rather than re-drawing the shapes every time.
self.picture = QtGui.QPicture()
p = QtGui.QPainter(self.picture)
p.setPen(pg.mkPen('w'))
w = (self.data[1][0] - self.data[0][0]) / 3.
for (t, open, close, min, max) in self.data:
p.drawLine(QtCore.QPointF(t, min), QtCore.QPointF(t, max))
if open > close:
p.setBrush(pg.mkBrush('r'))
else:
p.setBrush(pg.mkBrush('g'))
p.drawRect(QtCore.QRectF(t - w, open, w * 2, close - open))
p.end()
def paint(self, p, *args):
p.drawPicture(0, 0, self.picture)
def boundingRect(self):
## boundingRect _must_ indicate the entire area that will be drawn on
## or else we will get artifacts and possibly crashing.
## (in this case, QPicture does all the work of computing the bouning rect for us)
return QtCore.QRectF(self.picture.boundingRect())
if __name__=='__main__':
MainWindow()
|
{"/ChildGraph.py": ["/Indexer/__init__.py"], "/complexExample.py": ["/complex2.py"], "/decouple_window.py": ["/nullWindow.py"], "/kviewer_app.py": ["/kviewer2.py", "/indexer.py", "/parameter2.py"], "/Indexer/IndexerWidget.py": ["/Indexer/__init__.py"], "/MainFrame.py": ["/KViewer_new.py"], "/KViewer_new.py": ["/Indexer/__init__.py", "/ChildGraph.py", "/DataInterface/DataInterface.py"]}
|
3,176
|
smartgang/KViewer
|
refs/heads/master
|
/decouple_window.py
|
# -*- coding: utf-8 -*-
import nullWindow
from PyQt5 import QtCore, QtWidgets, QtGui
if __name__=='__main__':
import sys
app = QtWidgets.QApplication(sys.argv)
MainWindow = QtWidgets.QMainWindow()
ui = nullWindow.Ui_MainWindow()
ui.setupUi(MainWindow)
ui.tableWidget.setItem(0,0,QtWidgets.QTableWidgetItem(u'数据1'))
ui.tableWidget.setItem(1, 1, QtWidgets.QTableWidgetItem(u'数据2'))
ui.tableWidget.setItem(2, 2, QtWidgets.QTableWidgetItem(u'数据3'))
MainWindow.show()
sys.exit(app.exec_())
|
{"/ChildGraph.py": ["/Indexer/__init__.py"], "/complexExample.py": ["/complex2.py"], "/decouple_window.py": ["/nullWindow.py"], "/kviewer_app.py": ["/kviewer2.py", "/indexer.py", "/parameter2.py"], "/Indexer/IndexerWidget.py": ["/Indexer/__init__.py"], "/MainFrame.py": ["/KViewer_new.py"], "/KViewer_new.py": ["/Indexer/__init__.py", "/ChildGraph.py", "/DataInterface/DataInterface.py"]}
|
3,177
|
smartgang/KViewer
|
refs/heads/master
|
/Indexer/HullRsi.py
|
# -*- coding: utf-8 -*-
from IndexerBase import IndexerBase
import numpy as np
import talib
class HULL_RSI(IndexerBase):
indexer_name = 'HULL_RSI'
indexer_name_list = ['RSI']
default_para_dic = {
'N1': 5,
'M1': 5,
'M2': 9,
'N': 8
}
def __init__(self, raw_data, plt):
super(HULL_RSI, self).__init__(raw_data, plt)
self.indexer_name_list = ['RSI'] # MA的指标名和参数名都跟参数有关,所以要随参数进行设置
self.indexer_color_dic = {
'RSI': 'blue'
}
def calculate_indexer_value(self):
n1 = self.para_dic['N1']
m1 = self.para_dic['M1']
m2 = self.para_dic['M2']
n = self.para_dic['N']
close_array = np.array(self.raw_data['close'].values, dtype='float')
n = float(n)
rsi_data = talib.RSI(close_array, n1)
rsi_ema1 = talib.EMA(rsi_data, m1)
rsi_ema2 = talib.EMA(rsi_ema1, m2)
rsi_new = rsi_ema1 - rsi_ema2
n_2 = round(n / 2, 0)
n_squr = round(np.sqrt(n), 0)
wma1 = talib.MA(rsi_new, n, matype=2)
wma2 = talib.MA(rsi_new, n_2, matype=2)
x = wma2 * 2 - wma1
hull_ma = talib.MA(x, n_squr, matype=2)
self.indexer_value_dic['RSI'] = hull_ma
def draw_indexer(self):
i = 0
for indexer_name, values in self.indexer_value_dic.items():
c = self.indexer_color_dic[indexer_name][0]
self.plt_dic[indexer_name] = self.plt.plot(name=indexer_name, pen=c)
self.plt_dic[indexer_name].setData(values)
i += 1
def re_draw_indexer(self):
for pname, values in self.indexer_value_dic.items():
self.plt_dic[pname].setData(values)
def get_polar_value(self,start_pos, end_pos):
max_v = max(self.indexer_value_dic['RSI'][start_pos:end_pos])
min_v = min(self.indexer_value_dic['RSI'][start_pos:end_pos])
return max_v, min_v
|
{"/ChildGraph.py": ["/Indexer/__init__.py"], "/complexExample.py": ["/complex2.py"], "/decouple_window.py": ["/nullWindow.py"], "/kviewer_app.py": ["/kviewer2.py", "/indexer.py", "/parameter2.py"], "/Indexer/IndexerWidget.py": ["/Indexer/__init__.py"], "/MainFrame.py": ["/KViewer_new.py"], "/KViewer_new.py": ["/Indexer/__init__.py", "/ChildGraph.py", "/DataInterface/DataInterface.py"]}
|
3,178
|
smartgang/KViewer
|
refs/heads/master
|
/Indexer/DMI.py
|
# -*- coding: utf-8 -*-
from IndexerBase import IndexerBase
import numpy as np
import pandas as pd
class DMI(IndexerBase):
indexer_name = 'DMI'
indexer_name_list = ['PDI', 'MDI', 'ADX', 'ADXR']
default_para_dic = {
'N': 14,
'M': 6,
}
def __init__(self, raw_data, plt):
super(DMI, self).__init__(raw_data, plt)
self.indexer_name_list = ['PDI', 'MDI', 'ADX', 'ADXR'] # MA的指标名和参数名都跟参数有关,所以要随参数进行设置
self.indexer_color_dic = {
'PDI': 'blue',
'MDI': 'magenta',
'ADX': 'cyan',
'ADXR': 'green'
}
def calculate_indexer_value(self):
n = self.para_dic['N']
m = self.para_dic['M']
high = self.raw_data.high
print ('high')
low = self.raw_data.low
close = self.raw_data.close
closeshift1 = close.shift(1).fillna(0)
c = high - low
d = high - closeshift1
df1 = pd.DataFrame({'c': c, 'd': d})
df1['A'] = df1.max(axis=1)
df1.drop('c', axis=1, inplace=True)
df1.drop('d', axis=1, inplace=True)
df1['B'] = np.abs(low - closeshift1)
df1['C'] = df1.max(axis=1)
df1['TR'] = df1['C'].rolling(n).sum()
HD = high - high.shift(1).fillna(0)
LD = low.shift(1).fillna(0) - low
df1['HD'] = HD
df1['LD'] = LD
df2 = pd.DataFrame({'HD': HD, 'LD': LD})
df2['DMP_1'] = df2[(df2['HD'] > df2['LD']) & (df2['HD'] > 0)]['HD']
df2['DMM_1'] = df2[(df2['LD'] > df2['HD']) & (df2['LD'] > 0)]['LD']
df2 = df2.fillna(0)
df1['DMP'] = df2['DMP_1'].rolling(n).sum()
df1['DMM'] = df2['DMM_1'].rolling(n).sum()
del df2
df1['PDI'] = df1['DMP'] * 100 / df1['TR']
df1['MDI'] = df1['DMM'] * 100 / df1['TR']
adx = np.abs(df1['MDI'] - df1['PDI']) / (df1['MDI'] + df1['PDI']) * 100
print ("pre adx")
df1['ADX'] = adx.rolling(m).mean()
df1['ADXR'] = (df1['ADX'] + df1['ADX'].shift(m).fillna(0)) / 2
self.indexer_value_dic['PDI'] = df1['PDI'].tolist()
self.indexer_value_dic['MDI'] = df1['MDI'].tolist()
self.indexer_value_dic['ADX'] = df1['ADX'].tolist()
self.indexer_value_dic['ADXR'] = df1['ADXR'].tolist()
def draw_indexer(self):
i = 0
for indexer_name, values in self.indexer_value_dic.items():
c = self.indexer_color_dic[indexer_name][0]
self.plt_dic[indexer_name] = self.plt.plot(name=indexer_name, pen=c)
self.plt_dic[indexer_name].setData(values)
i += 1
def re_draw_indexer(self):
for pname, values in self.indexer_value_dic.items():
self.plt_dic[pname].setData(values)
def get_polar_value(self,start_pos, end_pos):
max_v = max(max(self.indexer_value_dic['PDI'][start_pos:end_pos]),
max(self.indexer_value_dic['MDI'][start_pos:end_pos]),
max(self.indexer_value_dic['ADX'][start_pos:end_pos]),
max(self.indexer_value_dic['ADXR'][start_pos:end_pos]))
min_v = min(min(self.indexer_value_dic['PDI'][start_pos:end_pos]),
min(self.indexer_value_dic['MDI'][start_pos:end_pos]),
min(self.indexer_value_dic['ADX'][start_pos:end_pos]),
min(self.indexer_value_dic['ADXR'][start_pos:end_pos]))
return max_v, min_v
|
{"/ChildGraph.py": ["/Indexer/__init__.py"], "/complexExample.py": ["/complex2.py"], "/decouple_window.py": ["/nullWindow.py"], "/kviewer_app.py": ["/kviewer2.py", "/indexer.py", "/parameter2.py"], "/Indexer/IndexerWidget.py": ["/Indexer/__init__.py"], "/MainFrame.py": ["/KViewer_new.py"], "/KViewer_new.py": ["/Indexer/__init__.py", "/ChildGraph.py", "/DataInterface/DataInterface.py"]}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.