seq_id stringlengths 4 11 | text stringlengths 113 2.92M | repo_name stringlengths 4 125 ⌀ | sub_path stringlengths 3 214 | file_name stringlengths 3 160 | file_ext stringclasses 18
values | file_size_in_byte int64 113 2.92M | program_lang stringclasses 1
value | lang stringclasses 93
values | doc_type stringclasses 1
value | stars int64 0 179k ⌀ | dataset stringclasses 3
values | pt stringclasses 78
values |
|---|---|---|---|---|---|---|---|---|---|---|---|---|
70077253225 | from __future__ import print_function
import requests, lxml.html
headers = {'user-agent': 'taco'}
urls_to_check = [
'http://www.packtpub.com/application-development/python-data-structures-and-algorithm',
'https://www.packtpub.com/big-data-and-business-intelligence/learning-data-mining-python-second-edition',
'https://www.packtpub.com/big-data-and-business-intelligence/neural-network-programming-python',
'https://www.packtpub.com/application-development/python-programming-blueprints'
]
print()
for url in urls_to_check:
title = url.split('/')[-1].replace('-', ' ').title()
print('Checking for title: %s'%title)
page = requests.get(url, headers=headers).content
tree = lxml.html.fromstring(page)
if not tree.cssselect('.title-preorder') and not tree.cssselect('.alpha-text'):
print('\t\n%s [READY FOR DOWNLOAD]\n'%title)
else:
print('\t\t\t\t\t\t\t(negative)')
url = 'https://www.packtpub.com/packt/offers/free-learning'
print('Checking the [FREE] title...')
page = requests.get(url, headers=headers).content
tree = lxml.html.fromstring(page)
print('\n\tFree Book: %s\n'%tree.cssselect('.dotd-title h2')[0].text_content().strip())
| chris-hamberg/scrapers | packt.py | packt.py | py | 1,212 | python | en | code | 0 | github-code | 36 |
9423482446 | #programa que lea un periodo de tiempo en segundos
# y muestre el numero de dias, horas, minutos y segundos
dias = 0
horas = 0
minuto = 0
segundos = 0
total = 0
dato_leido = input('Introduzca el número de segundos: ')
if dato_leido.isdigit():
segundos = int(dato_leido)
meses = segundos // 2592000
segundos = segundos % 2592000
dias = segundos // 86400
segundos = segundos % 86400
horas = segundos // 3600
segundos = segundos % 3600
minutos = segundos // 60
segundos = segundos % 60
print(f'{meses} meses, {dias} dias, {horas} horas, {minutos} minutos, {segundos} segundos')
else:
print('Este programa solo acepta números')
| staryellow7/Ejercicios-Python | Unidad 1 - Guia de Ejercicios 1/ejercicio14.py | ejercicio14.py | py | 673 | python | es | code | 0 | github-code | 36 |
246221889 | import datetime
import pandas as pd
from helper.fetch import Fetch
from helper.dynamic_scrape import DynamicScrape
from helper.property_helper import PropertyHelper
class Dhalia(object):
source = 'Dhalia'
columns = [
'Reference', 'Town', 'Type', 'Stage',
'Bedrooms', 'Bathrooms',
'TotalSqm', 'IntArea', 'ExtArea', 'Price'
]
@staticmethod
def fetch_data(is_sale: bool) -> pd.DataFrame:
data = pd.DataFrame()
proxies = Fetch.load_proxies()
page_type = 'buy' if is_sale else 'rent'
page_element = f'//div[@class="searchForm searchForm--quick-search page-{page_type}"]'
driver = Fetch.get_dynamic(f'https://www.dhalia.com/{page_type}/?pageIndex=1', proxies, page_element, True)
x_pages = '//li[@class="pager__last"]/a'
DynamicScrape.await_element(driver, x_pages)
pages = int(DynamicScrape.get_link(driver, x_pages).split('=')[1])
for page in range(1, pages+1):
x_links = '//a[@class="propertybox"]'
links = DynamicScrape.get_links(driver, x_links)
listing = []
x_features = './/div[@class="property-top__col__part property-top__col__part--others"]/span'
x_type_town = './/div[@class="property-top__col"]/h1'
x_description = './/div[@class="description write-up"]'
x_price = './/div[@class="property-top__col__part property-top__col__part--price"]'
for i, link in enumerate(links):
page_element = '//section[@class="property-detail-wrapper"]'
successful = DynamicScrape.open_tab_link(driver, link, page_element)
if successful:
features = DynamicScrape.get_texts(driver, x_features)
reference = [feature for feature in features if 'Ref: ' in feature]
reference = reference[0].replace('Ref: ', '').strip() if len(reference) else None
type_town = DynamicScrape.get_text(driver, x_type_town)
town = type_town.split(' in ')[1].strip()
type = type_town.split(' in ')[0].strip()
stage = PropertyHelper.determine_stage(driver, x_description, is_sale)
bedrooms = [side_info for side_info in features if 'Bedrooms' in side_info]
bedrooms = bedrooms[0].replace('Bedrooms', '') if len(bedrooms) else None
bathrooms = [side_info for side_info in features if 'Bathrooms' in side_info]
bathrooms = bathrooms[0].replace('Bathrooms', '') if len(bathrooms) else None
area = [side_info for side_info in features if 'm²' in side_info]
area = area[0].replace('m²', '').split('/') if len(area) else None
total_sqm = area[0] if area else None
int_area = area[1] if area else None
ext_area = area[2] if area else None
price = DynamicScrape.get_text(driver, x_price)
price = price.replace('€', '').replace(',', '')
try:
if ' daily' in price:
price = int(price.replace(' daily', '')) * 30
elif ' monthly' in price:
price = int(price.replace(' monthly', ''))
elif ' yearly' in price:
price = round(int(price.replace(' yearly', '')) / 12)
except ValueError:
price = None
listing.append([
reference, town, type, stage,
bedrooms, bathrooms,
total_sqm, int_area, ext_area, price
])
DynamicScrape.close_tab_link(driver)
print(
'%s\t %s\t Page %03d of %03d\t Entry %03d of %03d' %
(datetime.datetime.now().strftime("%d-%m-%Y %H:%M:%S"), Dhalia.source + ' ' + page_type.title(), page, pages, i+1, len(links))
)
# Concatenate previous data frame with data of current page
page_data = pd.DataFrame(listing, columns=Dhalia.columns)
data = pd.concat([data, page_data])
# Click Next Page
x_next_page = f'//ul[@class="pager"]/li/a/span[text()="{page+1}"]'
x_await_page = f'//ul[@class="pager"]/li[@class="pager__current"]/a/span[text()="{page+1}"]'
DynamicScrape.click_element(driver, x_next_page, x_await_page)
# Add source and rename columns
data.insert(0, 'Is_Sale', is_sale)
data.insert(1, 'Source', Dhalia.source)
# Close Driver
Fetch.dynamic_close_browser(driver)
# Return the data
return data
@staticmethod
def fetch_res_sale():
return Dhalia.fetch_data(True)
@staticmethod
def fetch_res_rent():
return Dhalia.fetch_data(False)
@staticmethod
def fetch_all(file_path: str) -> None:
# Fetching data
res_sale = Dhalia.fetch_res_sale()
res_rent = Dhalia.fetch_res_rent()
# Concatenate Data
data = pd.concat([res_sale, res_rent])
# Save data frame to CSV file
data.to_csv(file_path, index=False)
| brandonabela/Malta-Property-Analysis | src/export/dhalia.py | dhalia.py | py | 5,369 | python | en | code | 2 | github-code | 36 |
17116817824 | import sys
import argparse
import os
import math
from ROOT import TCanvas, TColor, TGaxis, TH1F, TPad, TString, TFile, TH1, THStack, gROOT, TStyle, TAttFill, TLegend, TGraphAsymmErrors, TLine
from ROOT import kBlack, kBlue, kRed, kCyan, kViolet, kGreen, kOrange, kGray, kPink, kTRUE
from ROOT import Double
from ROOT import gROOT, gStyle
from functools import reduce
gROOT.SetBatch(1)
gROOT.Reset()
gStyle.SetCanvasColor(0)
gStyle.SetFrameBorderMode(0)
gStyle.SetOptStat(0)
gStyle.SetTitleX(0.5) # title X location
gStyle.SetTitleY(0.96) # title Y location
gStyle.SetPaintTextFormat(".2f")
# options
usage = 'usage: %prog [options]'
parser = argparse.ArgumentParser(usage)
Nuisances_lnN={
"pdf_Higgs_ttH":0.036,
"QCDscale_ttH":0.093,
"pdf_tHq":0.010,
"QCDscale_tHq":0.067,
"pdf_tHW":0.027,
"QCDscale_tHW":0.061,
"pdf_TTW":0.04,"QCDscale_TTW":0.129,
"pdf_TTWW":0.03,"QCDscale_TTWW":0.109,
"pdf_TTZ":0.035, "QCDscale_TTZ":0.112,
"CMS_ttHl_WZ_theo":0.07,
"pdf_WH":0.019,"QCDscale_WH":0.07,
"pdf_ZH":0.016,"QCDscale_ZH":0.038,
"pdf_qqH":0.021,"QCDscale_qqH":0.04,
"pdf_ggH":0.031,"QCDscale_ggH":0.081,
"BR_htt":0.016,"BR_hww":0.015,"BR_hzz":0.015,"BR_hzg":0.010,"BR_hmm":0.010,
"lumi":0.03,"CMS_ttHl_QF":0.300,"CMS_ttHl_EWK_4j":0.300,"CMS_ttHl_Convs":0.500,"CMS_ttHl_Rares":0.500,"CMS_ttHl_EWK":0.500,
}
lnN_per_sample={
"data_flips":["CMS_ttHl_QF"],
"TTZ":["pdf_TTZ","QCDscale_TTZ","lumi"],
"TTW":["pdf_TTW","QCDscale_TTW","lumi"],
"TTWW":["pdf_TTWW","QCDscale_TTWW","lumi"],
"WZ":["CMS_ttHl_EWK_4j","CMS_ttHl_EWK","lumi"],
"ZZ":["CMS_ttHl_EWK_4j","CMS_ttHl_EWK","lumi"],
"Convs":["CMS_ttHl_Convs","lumi"],
"Rares":["CMS_ttHl_Rares","lumi"],
"ttH_hww":["pdf_Higgs_ttH","QCDscale_ttH","BR_hww","lumi"],
"ttH_hzz":["pdf_Higgs_ttH","QCDscale_ttH","BR_hzz","lumi"],
"ttH_hmm":["pdf_Higgs_ttH","QCDscale_ttH","BR_hmm","lumi"],
"ttH_htt":["pdf_Higgs_ttH","QCDscale_ttH","BR_htt","lumi"],
"ttH_hzg":["pdf_Higgs_ttH","QCDscale_ttH","BR_hzg","lumi"],
"tHW_hww":["pdf_tHW","QCDscale_tHW","BR_hww","lumi"],
"tHW_hzz":["pdf_tHW","QCDscale_tHW","BR_hzz","lumi"],
"tHW_hmm":["pdf_tHW","QCDscale_tHW","BR_hmm","lumi"],
"tHW_htt":["pdf_tHW","QCDscale_tHW","BR_htt","lumi"],
"tHW_hzg":["pdf_tHW","QCDscale_tHW","BR_hzg","lumi"],
"tHq_hww":["pdf_tHq","QCDscale_tHq","BR_hww","lumi"],
"tHq_hzz":["pdf_tHq","QCDscale_tHq","BR_hzz","lumi"],
"tHq_hmm":["pdf_tHq","QCDscale_tHq","BR_hmm","lumi"],
"tHq_htt":["pdf_tHq","QCDscale_tHq","BR_htt","lumi"],
"tHq_hzg":["pdf_tHq","QCDscale_tHq","BR_hzg","lumi"],
"qqH_hww":["pdf_qqH","QCDscale_qqH","BR_hww","lumi"],
"qqH_hzz":["pdf_qqH","QCDscale_qqH","BR_hzz","lumi"],
"qqH_htt":["pdf_qqH","QCDscale_qqH","BR_htt","lumi"],
"ggH_hww":["pdf_ggH","QCDscale_ggH","BR_hww","lumi"],
"ggH_hzz":["pdf_ggH","QCDscale_ggH","BR_hzz","lumi"],
"ggH_htt":["pdf_ggH","QCDscale_ggH","BR_htt","lumi"],
"WH_hww":["pdf_WH","QCDscale_WH","BR_hww","lumi"],
"WH_hzz":["pdf_WH","QCDscale_WH","BR_hzz","lumi"],
"WH_htt":["pdf_WH","QCDscale_WH","BR_htt","lumi"],
"ZH_hww":["pdf_ZH","QCDscale_ZH","BR_hww","lumi"],
"ZH_hzz":["pdf_ZH","QCDscale_ZH","BR_hzz","lumi"],
"ZH_htt":["pdf_ZH","QCDscale_ZH","BR_htt","lumi"],
"TTWH_hww":["BR_hww","lumi"],
"TTWH_hzz":["BR_hzz","lumi"],
"TTWH_htt":["BR_htt","lumi"],
"TTZH_hww":["BR_hww","lumi"],
"TTZH_hzz":["BR_hzz","lumi"],
"TTZH_htt":["BR_htt","lumi"],
}
common_shape = ["CMS_ttHl_lepEff_muloose","CMS_ttHl_lepEff_elloose", "CMS_ttHl_lepEff_mutight","CMS_ttHl_lepEff_eltight", "CMS_ttHl_JER","CMS_ttHl_UnclusteredEn","CMS_scale_j_jesFlavorQCD", "CMS_scale_j_jesRelativeBal","CMS_scale_j_jesHF","CMS_scale_j_jesBBEC1","CMS_scale_j_jesEC2","CMS_scale_j_jesAbsolute"]
thuShape_samples = ["ttH_htt","ttH_hzz","ttH_hww","ttH_hmm","ttH_hzg","tHq_htt","tHq_hww","tHq_hzz","tHW_htt","tHW_hww","tHW_hzz","TTW","TTZ"]
thuShape = ["CMS_ttHl_thu_shape_ttH_x1","CMS_ttHl_thu_shape_ttH_y1"]
fakeShape = ["CMS_ttHl_Clos_e_shape","CMS_ttHl_Clos_m_shape","CMS_ttHl_FRm_norm","CMS_ttHl_FRm_pt","CMS_ttHl_FRm_be","CMS_ttHl_FRe_norm","CMS_ttHl_FRe_pt","CMS_ttHl_FRe_be"]
shape_2016=[
"CMS_ttHl16_L1PreFiring", "CMS_ttHl16_btag_HFStats1","CMS_ttHl16_btag_HFStats2","CMS_ttHl16_btag_LFStats1","CMS_ttHl16_btag_LFStats2","PU_16",
"CMS_scale_j_jesRelativeSample_2016","CMS_scale_j_jesBBEC1_2016","CMS_scale_j_jesEC2_2016","CMS_scale_j_jesAbsolute_2016","CMS_scale_j_jesHF_2016",
]
shape_2017=[
"CMS_ttHl17_L1PreFiring", "CMS_ttHl17_btag_HFStats1","CMS_ttHl17_btag_HFStats2","CMS_ttHl17_btag_LFStats1","CMS_ttHl17_btag_LFStats2","PU_17",
"CMS_scale_j_jesRelativeSample_2017","CMS_scale_j_jesBBEC1_2017","CMS_scale_j_jesEC2_2017","CMS_scale_j_jesAbsolute_2017","CMS_scale_j_jesHF_2017",
]
shape_2018=[
"CMS_ttHl18_btag_HFStats1","CMS_ttHl18_btag_HFStats2","CMS_ttHl18_btag_LFStats1","CMS_ttHl18_btag_LFStats2","PU_18",
"CMS_scale_j_jesRelativeSample_2018","CMS_scale_j_jesBBEC1_2018","CMS_scale_j_jesEC2_2018","CMS_scale_j_jesAbsolute_2018","CMS_scale_j_jesHF_2018",
]
def draw_underflow_overflow(h1):
h1.GetXaxis().SetRange(0, h1.GetNbinsX() + 1)
h1.Draw()
return h1
def fill_underflow_overflow(h1):
nbin = h1.GetNbinsX()
h1.Fill(h1.GetBinCenter(1),h1.GetBinContent(0))
h1.Fill(h1.GetBinCenter(nbin),h1.GetBinContent(nbin+1))
h1.Draw()
return h1
def fill_lnN_error(hist_nom, lnNs):
if len(lnNs) ==0:
return hist_nom
nbin = hist_nom.GetNbinsX()
error_rel = 0
error_rel = reduce((lambda x,y : math.sqrt(x**2 + y**2)), lnNs)
for i in range(1,nbin+1):
central_val = hist_nom.GetBinContent(i)
error_lnN = central_val * error_rel
error_nom = hist_nom.GetBinError(i)
error = math.sqrt(error_nom**2 + error_lnN**2)
hist_nom.SetBinError(i, error)
return hist_nom
def set_lnN_error(hist_nom, lnNs):
nbin = hist_nom.GetNbinsX()
error_rel = 0
if len(lnNs) ==0:
for i in range(1,nbin+1):
hist_nom.SetBinError(i, 0)
return hist_nom
error_rel = reduce((lambda x,y : math.sqrt(x**2 + y**2)), lnNs)
for i in range(1,nbin+1):
central_val = hist_nom.GetBinContent(i)
error_lnN = central_val * error_rel
hist_nom.SetBinError(i, error_lnN)
return hist_nom
def fill_shape_error(hist_nom, hist_up, hist_down):
nbin = hist_nom.GetNbinsX()
for i in range(1,nbin+1):
central_val = hist_nom.GetBinContent(i)
error_nom = hist_nom.GetBinError(i)
error_up = abs(central_val - hist_up.GetBinContent(i))
error_down = abs(central_val - hist_up.GetBinContent(i))
error_syst = max(error_up, error_down)
error = math.sqrt(error_nom**2 + error_syst**2)
hist_nom.SetBinError(i, error)
return hist_nom
def find_lnN(keyname):
names_lnN=[]
if keyname in lnN_per_sample:
names_lnN = lnN_per_sample[keyname]
else:
print("########## WARNING ######### {} is not found in lnN_per_sample, set it to empty list ".format(keyname))
err_lnNs = []
for name_lnN in names_lnN:
if name_lnN in Nuisances_lnN:
err_lnNs.append(Nuisances_lnN[name_lnN])
else:
print("########## WARNING ######### {} is not found in Nuisances_lnN, skip this nuisance ".format(name_lnN))
return err_lnNs
def find_shapes(keyname, era):
names_shapes = []
if era == "2016":
mc_shapes = common_shape + shape_2016
elif era == "2017":
mc_shapes = common_shape + shape_2017
elif era == "2018":
mc_shapes = common_shape + shape_2018
else:
print("ERROR year must be 2016 2017 or 2018")
sys.exit()
if "fakes" in keyname or "Fakes" in keyname:
names_shapes = fakeShape
elif "data" in keyname:
return names_shapes
elif keyname in thuShape_samples:
names_shapes = mc_shapes + thuShape
else:
names_shapes = mc_shapes
return names_shapes
def getvarhists(rfile, keyname, systname):
h_up = rfile.Get("{}_{}Up".format(keyname,systname))
h_up.SetDirectory(0)
h_down = rfile.Get("{}_{}Down".format(keyname,systname))
h_down.SetDirectory(0)
return h_up, h_down
# outtput
outfilename = "{}/ttH_{}_{}_full_uncertanty_runII.root".format(outputdir,region , cutname)
f_out = TFile(outfilename,"recreate")
print(" recreate file {}".format(outfilename))
for feature, values in features.items():
for sample in sampleName:
outhist_sum = sample+"_"+feature+"_runII"
outhist_sum_stat = sample+"_"+feature+"_runII_stat"
outhist_sum_syst = sample+"_"+feature+"_runII_syst"
ycount = 0
for y in ["2016","2017","2018"]:
file0 = TFile("{}/{}/{}/ttH_{}_{}_{}.root".format(inputDir, catflag, feature, region, cutname, y),"read")
errorlnNs = find_lnN(sample)
errShapes = find_shapes(sample, y)
file0.cd()
h_nom = file0.Get(sample)
h_nom.SetDirectory(0)
h_stat = h_nom.Clone(sample+"_stat")
h_stat.SetDirectory(0)
h_syst = h_nom.Clone(sample+"_syst")
h_syst.SetDirectory(0)
hist_all = fill_lnN_error(h_nom, errorlnNs)
h_syst = set_lnN_error(h_syst, errorlnNs)
# count = 0
for shapeName in errShapes:
#print( "sample {} syst {} ".format(sample, shapeName))
hist_up, hist_down = getvarhists(file0, sample, shapeName)
hist_all = fill_shape_error(hist_all, hist_up, hist_down)
h_syst = fill_shape_error(h_syst, hist_up, hist_down)
outhist_name = sample+"_"+feature+"_"+y
h_out = hist_all.Clone(outhist_name)
h_out.SetTitle(outhist_name)
h_out.SetName(outhist_name)
outhist_name_stat = sample+"_"+feature+"_"+y + "_stat"
h_out_stat = h_stat.Clone(outhist_name_stat)
h_out_stat.SetTitle(outhist_name_stat)
h_out_stat.SetName(outhist_name_stat)
outhist_name_syst = sample+"_"+feature+"_"+y + "_syst"
h_out_syst = h_syst.Clone(outhist_name_syst)
h_out_syst.SetTitle(outhist_name_syst)
h_out_syst.SetName(outhist_name_syst)
f_out.cd()
h_out.Write()
h_out_stat.Write()
h_out_syst.Write()
# sum
if ycount ==0:
h_outsum = hist_all.Clone(outhist_sum)
h_outsum.SetTitle(outhist_sum)
h_outsum.SetName(outhist_sum)
h_outsum_stat = h_out_stat.Clone(outhist_sum_stat)
h_outsum_stat.SetTitle(outhist_sum_stat)
h_outsum_stat.SetName(outhist_sum_stat)
h_outsum_syst = h_out_syst.Clone(outhist_sum_syst)
h_outsum_syst.SetTitle(outhist_sum_syst)
h_outsum_syst.SetName(outhist_sum_syst)
else:
h_outsum.Add(hist_all)
h_outsum_stat.Add(h_out_stat)
h_outsum_syst = h_syst_add(h_outsum_syst, h_out_syst)
ycount +=1
f_out.cd()
h_outsum.Write()
h_outsum_stat.Write()
h_outsum_syst.Write()
f_out.Close()
| BinghuanLi/post_tWIHEP | plotters/make_systHists.py | make_systHists.py | py | 11,190 | python | en | code | 0 | github-code | 36 |
43851216200 | import Diverter as di
def ChoiceMaker(data,filereports):
print("Done with data reading, Ready To start Analysis")
print()
print()
Choice=input("Which report you want to generate? (enter choice All/Single) ")
if(Choice.lower()=="all" or Choice.lower()=="all"):
di.diverter(data,"all",filereports)
elif(Choice.lower()=="single"):
print("These are the apps in the data:")
print()
print(list(data["AppName"].unique()))
AppChoice=input("Which App you want to analyse from the above list( Enter the app name as shown above): ")
if(AppChoice not in list(data["AppName"].unique())):
print("App Choice is not in present in data,try again with valid input")
ChoiceMaker(data,filereports)
di.diverter(data,AppChoice,filereports)
else:
print("Not A valid Input, Try Again !")
ChoiceMaker(data,filereports)
exit()
| AbhiDasariNYU/ApplicationStatusBot | Choice.py | Choice.py | py | 1,003 | python | en | code | 0 | github-code | 36 |
27876220800 | # Core Pkgs
import streamlit as st
#Other Pkgs
#EDA Pkgs
import pandas as pd
import codecs
from pandas_profiling import ProfileReport
#Component Pkgs
import streamlit.components.v1 as components #v1 is version1 : If new features are added, then it will not break your app
from streamlit_pandas_profiling import st_profile_report
#Custom Component Functions
import sweetviz as sv
def st_display_sweetviz(report_html, width=1000,height = 500):
report_file = codecs.open(report_html, 'r') #codecs help in reading html file
page = report_file.read()
components.html(page,width= width, height=height, scrolling=True)
def main():
"""A Simple EDA App with Streamlit Components (Using Pandas Profiling and Sweetviz in Streamlit)"""
menu = ["Pandas Profile", "Sweetviz"]
choice = st.sidebar.selectbox("Menu", menu)
if choice == "Pandas Profile":
st.subheader("Automated Exploratory Data Analsis (with Pandas Profile)")
st.write("EDA is the task of analyzing data from statistics, simple plotting tools, linear algebra and other techniques to understand what the dataset is, before we go to actual machine learning.")
st.write("Pandas Profile generates profile reports from a pandas DataFrame. Pandas Profiling extends the pandas DataFrame for quick data analysis.")
st.set_option('deprecation.showfileUploaderEncoding', False)
data_file = st.file_uploader("Upload CSV", type = ['csv'])
if data_file is not None:
df = pd.read_csv(data_file)
st.dataframe(df.head())
profile = ProfileReport(df)
st_profile_report(profile)
elif choice == "Sweetviz":
st.subheader("Automated Exploratory Data Analysis (with Sweetviz)")
st.write("Sweetviz is an open source Python library that generates beautiful, high-density visualizations to kickstart EDA (Exploratory Data Analysis). Output is a fully self-contained HTML application.The system is built around quickly visualizing target values and comparing datasets. Its goal is to help quick analysis of target characteristics, training vs testing data, and other such data characterization tasks.")
data_file = st.file_uploader("Upload CSV", type = ['csv'])
st.set_option('deprecation.showfileUploaderEncoding', False)
if data_file is not None:
df = pd.read_csv(data_file)
st.dataframe(df.head())
#Normal Workflow for sweetviz
report = sv.analyze(df)
report.show_html()
st_display_sweetviz("SWEETVIZ_REPORT.html")
| yashpupneja/StreamAI | DS_pandas_profiling.py | DS_pandas_profiling.py | py | 2,412 | python | en | code | 0 | github-code | 36 |
2509822081 | # Iris Recognition
# 04. Module to match iris descriptions.
# Language: Python 3
import numpy
import cv2
ROTATIONS = [-10, -9, -8, -7, -6, -5, -4, -3, -2, -1, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
def _rotate_norm_image(image, rotation):
output = numpy.zeros(image.shape, image.dtype)
if rotation == 0:
return image
else:
output[:, rotation:] = image[:, :-rotation]
output[:, :rotation] = image[:, -rotation:]
return output
def _compute_norm_hamming_distance(description_1, mask_1, description2, mask_2):
comb_mask = cv2.bitwise_and(mask_1, mask_2)
bit_up_count = numpy.sum(comb_mask > 0)
xor_output = cv2.bitwise_xor(description_1, description2)
xor_output = cv2.bitwise_and(xor_output, xor_output, mask=comb_mask)
dist = numpy.sum(xor_output > 0)
return float(dist) / bit_up_count
def match(descriptions_1, mask_1, descriptions_2, mask_2):
rot_distances = []
for rotation in ROTATIONS:
distances = []
for i in range(len(descriptions_1)): # could be "for i in range(len(descriptions_2)):"
desc_1 = descriptions_1[i]
rot_desc_2 = _rotate_norm_image(descriptions_2[i], rotation)
rot_mask_2 = _rotate_norm_image(mask_2, rotation)
distances.append(_compute_norm_hamming_distance(desc_1, mask_1, rot_desc_2, rot_mask_2))
rot_distances.append(numpy.mean(distances))
print('[INFO] Computed normalized Hamming distance.')
return numpy.min(rot_distances)
| EmmanuelOlofintuyi/Biometrics | Iris Recognition/d_match_iris.py | d_match_iris.py | py | 1,516 | python | en | code | 0 | github-code | 36 |
1938073175 | from pathlib import Path
import json
from .util import filter_fields
def KMANGLED_encode(
data, sort_keys=False, indent=None, ignore_private=False, ignore_none=False
):
return json.dumps(
filter_fields(data, ignore_private, ignore_none),
sort_keys=sort_keys,
indent=indent,
)
def KMANGLED_decode(value: str):
return json.loads(value)
def KMANGLED_dump_to_file(
data,
filename: str,
sort_keys=False,
indent=None,
ignore_private=False,
ignore_none=False,
):
json_str = KMANGLED_encode(data, sort_keys, indent, ignore_private, ignore_none)
Path(filename).write_text(json_str)
| kcl-lang/kcl-py | kclvm/compiler/extension/builtin/system_module/json.py | json.py | py | 652 | python | en | code | 8 | github-code | 36 |
70656671785 | """
Some of code was taken from https://pytorch.org/vision/stable/_modules/torchvision/models/resnet.html
"""
import torch
from torch import Tensor, nn
from typing import Optional, List
from torchvision.models import resnet18
def conv3x3(input_size: int, output_size: int, stride: int = 1) -> nn.Conv2d:
return nn.Conv2d(input_size, output_size, kernel_size=3, stride=stride, padding=1, bias=False)
def conv1x1(input_size: int, output_size: int, stride: int = 1) -> nn.Conv2d:
return nn.Conv2d(input_size, output_size, kernel_size=1, stride=stride, bias=False)
class BasicBlock(nn.Module):
def __init__(
self,
input_size: int,
output_size: int,
stride: int = 1,
downsample: Optional[nn.Module] = None,
):
super().__init__()
self.conv1 = conv3x3(input_size, output_size, stride)
self.bn1 = nn.BatchNorm2d(output_size)
self.relu = nn.ReLU(inplace=True)
self.conv2 = conv3x3(output_size, output_size)
self.bn2 = nn.BatchNorm2d(output_size)
self.downsample = downsample
self.stride = stride
def forward(self, x: Tensor) -> Tensor:
identity = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
if self.downsample is not None:
identity = self.downsample(x)
out += identity
out = self.relu(out)
return out
class ResNetCNN(nn.Module):
"""
Realizes ResNet-like neural network for one-dimentional pictures.
"""
def __init__(
self,
layers: List[int] = None,
output_size: int = 128,
):
super().__init__()
if layers is None:
layers = [2, 2, 2]
if len(layers) != 3:
raise ValueError(
f'List of layers should have 3 elements, got {len(layers)}')
self.relu = nn.ReLU()
self.output = output_size
self.input_size = 128
self.layer0 = nn.Sequential(
nn.Conv2d(1, self.input_size, kernel_size=7, padding=3),
nn.BatchNorm2d(self.input_size),
nn.ReLU(),
nn.MaxPool2d(2)
)
self.layer1 = self._make_layer(128, layers[0])
self.layer2 = self._make_layer(256, layers[1], stride=2)
self.layer3 = self._make_layer(512, layers[2], stride=2)
self.downsample = conv1x1(512, self.output)
def _make_layer(self, output_size: int, blocks: int, stride: int = 1) -> nn.Sequential:
downsample = None
if stride != 1 or self.input_size != output_size:
downsample = nn.Sequential(
conv1x1(self.input_size, output_size, stride),
nn.BatchNorm2d(output_size),
)
layers = [BasicBlock(self.input_size, output_size, stride, downsample)]
self.input_size = output_size
for _ in range(1, blocks):
layers.append(BasicBlock(self.input_size, output_size))
return nn.Sequential(*layers)
def forward(self, x: Tensor) -> Tensor:
x = self.layer0(x)
x = self.layer1(x)
x = self.layer2(x)
x = self.layer3(x)
# (batch_size, output_channels, height, width)
x = self.downsample(x)
return x.squeeze(0) # (output_channels, height, width)
class CNN(nn.Module):
def __init__(self, output_size: int = 128):
super().__init__()
self.input_size = 64
self.layer0 = nn.Sequential(
nn.Conv2d(1, 64, kernel_size=7, padding=3),
nn.BatchNorm2d(64),
nn.ReLU(),
nn.MaxPool2d((1, 2))
)
self.layer1 = self._make_layer(128)
self.layer2 = self._make_layer(256)
self.layer3 = self._make_layer(512)
self.downsample = nn.Sequential(
conv3x3(self.input_size, output_size),
nn.BatchNorm2d(output_size),
nn.ReLU()
)
def forward(self, x: Tensor) -> Tensor:
x = self.layer0(x)
x = self.layer1(x)
x = self.layer2(x)
x = self.layer3(x)
x = self.downsample(x)
return x.squeeze(0)
def _make_layer(self, output_size: int) -> nn.Sequential:
layer = nn.Sequential(
conv3x3(self.input_size, output_size),
nn.BatchNorm2d(output_size),
nn.ReLU(),
conv3x3(output_size, output_size),
nn.BatchNorm2d(output_size),
nn.ReLU(),
nn.MaxPool2d(2)
)
self.input_size = output_size
return layer
| timtibilov/AttentionOCR | src/model/cnn.py | cnn.py | py | 4,622 | python | en | code | 7 | github-code | 36 |
74678278503 | from django.contrib.auth.decorators import permission_required , login_required
from django.shortcuts import render , redirect , get_object_or_404,HttpResponseRedirect , HttpResponse
from form_1.forms import Form1Form
from form_1.models import Form1
from giris.views import login_view
from .models import Form3 as Form3Model , Form3 , Malzeme
from .forms import Form3Form , MalzemeForm
from django.db.models import Q
import mimetypes
from form2.models import Form2
from form4.models import Form4
@login_required(login_url=login_view)
# Create your views here.
def form3_view(request):
if request.user.is_staff or request.user.is_superuser:
listem = Form3Model.objects.all().order_by('Form33__id')
else:
listem = Form3Model.objects.all().order_by('Form33__id').filter(Olusturan=request.user)
"""query = request.GET.get('q')
if query:
listem = listem.filter(
Q(id=int(query)) |
Q(Olusturan__username__icontains=query) |
Q(isin_kategorisi__icontains=query) |
Q(Aciklama__icontains=query)
).distinct()"""
return render(request , 'Form3/Form3.html' , {'listem': listem,'islemde':
[islemde.Form44 for islemde in Form4.objects.all() if islemde.Form44]
,'malzeme':malzeme })
@login_required(login_url=login_view)
def create(request,form1):
form3=Form3Form(request.POST or None, request.FILES or None)
context = {'form3': form3}
if request.method == "POST":
if form3.is_valid():
a=form3.save(commit=False)
a.Form33 = Form2.objects.get(Form22=form1)
a.Olusturan = request.user
a.save()
if a.isin_kategorisi=='Malz.Tedariği':
return redirect('create_malzeme',form3=a.id)
return redirect(form3_view)
return render(request, 'Form3/create.html', context)
@login_required(login_url=login_view)
def detail(request,pk):
listem = get_object_or_404(Form3Model, id=pk)
context = {'listem': listem}
return render(request , 'Form3/detail.html', context)
@login_required(login_url=login_view)
@permission_required('form3.delete_form3',login_url=form3_view)
def delete(request , pk):
listem = Form3Model.objects.get(id=pk)
listem.delete()
context = {'listem': listem}
return redirect('form3_view')
@login_required(login_url=login_view)
@permission_required('form3.change_form3',login_url=form3_view)
def update(request,pk):
listem = get_object_or_404(Form3Model , id=pk)
form3 = Form3Form(request.POST or None ,request.FILES or None, instance=listem)
if form3.is_valid():
form3.save()
return redirect('form3_view')
context = {'form3': form3}
return render(request, 'Form3/create.html', context)
@login_required(login_url=login_view)
def download(request , pk):
listem = get_object_or_404(Form3Model , id=pk)
file_path = listem.dosya.path
file_name = str(listem.dosya)
fh = open(file_path , 'rb')
mime_type , _ = mimetypes.guess_type(file_path)
response = HttpResponse(fh , content_type=mime_type)
response['Content-Disposition'] = f"attachment; filename={file_name}"
return response
@login_required(login_url=login_view)
def create_malzeme(request,form3):
malzeme_tedarik=MalzemeForm(request.POST or None)
context={'malzeme_tedarik':malzeme_tedarik}
if request.method=='POST' and malzeme_tedarik.is_valid():
b=malzeme_tedarik.save(commit=False)
b.Form333=Form3.objects.get(id=form3)
b.save()
return render(request,'Form3/MalzemeTedarik.html',context)
@login_required(login_url=login_view)
def malzeme(request,form3):
malzemeler=Malzeme.objects.filter(Form333=form3)
context={'malzemeler':malzemeler}
return render(request,'Form3/malzemeview.html',context)
| orhunakar01/hekimbey01 | form3/views.py | views.py | py | 3,907 | python | en | code | 1 | github-code | 36 |
35259366868 | from os.path import abspath, dirname, realpath
pythonpath = abspath(dirname(dirname(dirname(realpath(__file__)))))
django_settings = 'liveapi.settings'
bind = '0.0.0.0:8000'
workers = 2
timeout = 30
max_requests = 1500
max_requests_jitter = int(max_requests * 0.1)
preload = True | KhameBigDataGroup/live-webapi-consumer | confs/gunicorn/main.py | main.py | py | 284 | python | en | code | 0 | github-code | 36 |
21141404825 | # variable1
slipGaji = "PT. XYZ"
garis = '------------------'
namaPegawai1 = "Ahmad"
agama = "Islam"
jumlahAnak = 2
gaji_pokok = 4000000
tunjangan_jabatan = 20/100
tunjangan_keluarga = 10/100
gaji_kotor = gaji_pokok + 800000 + 400000
zakat_profesi = gaji_kotor - 2.5/100
take_home_pay = gaji_pokok + 800000 + 400000
# variable2
namaPegawai2 = "Alex"
agama2 = "Kristen Protestan"
jumlahAnak2 = 5
gaji_Pokok2 = 6000000
tunjangan_jabatan2 = 20/100
tunjangan_keluarga2 = 20/100
gaji_kotor2 = gaji_Pokok2 + 1200000 + 1200000
zakat_profesi2 = gaji_kotor2 - 0
take_home_pay2 = gaji_Pokok2 + 1200000 + 1200000 - 0
# pegawai1
if gaji_pokok < 400000:
tunjangan_jabatan = gaji_pokok * (20/100)
else:
tunjangan_jabatan = gaji_pokok * (20/100)
if gaji_pokok < 400000:
tunjangan_keluarga = gaji_pokok * (10/100)
else:
tunjangan_keluarga = gaji_pokok * (10/100)
if gaji_kotor < 5200000:
zakat_profesi = gaji_kotor * (2.5/100)
else:
zakat_profesi = gaji_kotor * (2.5/100)
# pegawai2
if gaji_Pokok2 < 600000:
tunjangan_jabatan2 = gaji_Pokok2 * (20/100)
else:
tunjangan_jabatan2 = gaji_Pokok2 * (20/100)
if gaji_Pokok2 < 600000:
tunjangan_keluarga2 = gaji_Pokok2 * (20/100)
else:
tunjangan_keluarga2 = gaji_Pokok2 * (20/100)
if gaji_kotor2 < 6400000:
zakat_profesi2 = 0
else:
zakat_profesi2 = 0
# print pegawai1
print("SLIP GAJI",slipGaji,
"\n",garis,
"\nNama Pegawai\t\t\t:",namaPegawai1,
"\nAgama\t\t\t\t:",agama,
"\nJumblah Anak\t\t\t:",jumlahAnak,
"\nGaji Pokok\t\t\t:", gaji_pokok)
tunjangan_jabatan = gaji_pokok + tunjangan_jabatan
print('tunjangan jabatan : {}'.format(int(tunjangan_jabatan)))
tunjangan_keluarga = gaji_pokok + tunjangan_keluarga
print('tunjangan keluarga : {}'.format(int(tunjangan_keluarga)))
gaji_kotor = gaji_pokok + 400000 + 800000
print('gaji kotor : {}'.format(int(gaji_kotor)))
zakat_profesi = gaji_kotor - zakat_profesi
print('zakat profesi : {}'.format(int(zakat_profesi)))
take_home_pay = gaji_pokok + 400000 + 800000 - 130000
print('take_home_pay : {}'.format(int(take_home_pay)))
# print pegawai2
print("\nSLIP GAJI",slipGaji,
"\n",garis,
"\nNama Pegawai\t\t\t:",namaPegawai2,
"\nAgama\t\t\t\t:",agama2,
"\nJumlah Anak\t\t\t:",jumlahAnak2,
"\nGaji Pokok\t\t\t:",gaji_Pokok2)
tunjangan_jabatan2 = gaji_Pokok2 + tunjangan_jabatan2
print('tunjangan jabatan : {}'.format(int(tunjangan_jabatan2)))
tunjangan_keluarga2 = gaji_Pokok2 + tunjangan_keluarga2
print('tunjangan keluarga : {}'.format(int(tunjangan_keluarga2)))
gaji_kotor2 = gaji_Pokok2 + 1200000 + 1200000
print('gaji kotor : {}'.format(int(gaji_kotor2)))
zakat_profesi2 = 0
print('zakat profesi : {}'.format(int(zakat_profesi2)))
take_home_pay2 = gaji_Pokok2 + 1200000 + 1200000 - 0
print('take_home_pay : {}'.format(int(take_home_pay2)))
| bahlurali/pw | python 1.py | python 1.py | py | 3,048 | python | id | code | 0 | github-code | 36 |
3112497590 | #!/usr/bin/env python3.7
import argparse
import json
import sys
def matches(parts, subject):
if len(parts) == 0:
yield subject
return
part, *rest = parts
# If we're extracting something from `subject`, and `subject` is neither a
# list nor a dict, then there's nothing to extract. Whether this is an
# error or just a no-op was part of how my original solution was wrong.
if type(subject) not in [list, dict]:
return
if type(subject) is list:
if part == '*':
for child in subject:
yield from matches(rest, child)
return
try:
index = int(part)
except ValueError:
return # can't extract a property name from a list
yield from matches(rest, subject[index])
else:
assert type(subject) is dict
if part == '*':
for child in subject.values():
yield from matches(rest, child)
elif part in subject:
yield from matches(rest, subject[part])
def parse(pattern):
# Corner case: If the pattern is empty, then splitting on "." would yield
# `[""]` instead of `[]`.
if len(pattern) == 0:
return []
else:
return pattern.split('.')
def extract(pattern, subject):
parts = parse(pattern)
results = list(matches(parts, subject))
# If there were no wildcards in the query, then at most one thing can be
# matched. Avoid the redundant outer list when possible.
if '*' in parts:
return results # list of results
if len(results) == 0:
return None
assert len(results) == 1
return results[0]
def parse_command_line(args):
parser = argparse.ArgumentParser(description='Extract values from JSON.')
parser.add_argument('pattern',
help='JSON query (path) to extract from input')
return parser.parse_args()
if __name__ == '__main__':
options = parse_command_line(sys.argv[1:])
result = extract(options.pattern, json.load(sys.stdin))
if result is not None:
json.dump(result, sys.stdout, indent=4, sort_keys=True)
print() # for the newline
| dgoffredo/jex | src/jex.py | jex.py | py | 2,192 | python | en | code | 0 | github-code | 36 |
70925956263 | class newNode:
def __init__(self, data):
self.data = data
self.left = self.right = None
# Function to find height of a tree
def height(root, ans):
if (root == None):
return 0
left_height = height(root.left, ans)
right_height = height(root.right, ans)
ans[0] = max(ans[0], 1 + left_height +
right_height)
return 1 + max(left_height,
right_height)
def diameter(root):
if (root == None):
return 0
ans = [-999999999999]
height_of_tree = height(root, ans)
return ans[0]
# Driver code
if __name__ == '__main__':
root = newNode(1)
root.left = newNode(2)
root.right = newNode(3)
root.left.left = newNode(4)
root.left.right = newNode(5)
print("Diameter is", diameter(root)) | shuklaforyou/place | advance/dyanamic programing/dp on tree/problem/diameter of tree (binary)/firstapproch.py | firstapproch.py | py | 825 | python | en | code | 0 | github-code | 36 |
71636759143 | class Nodo:
def __init__(self, tipo, nombre=None, isArray=None, direccion=None, siguiente=None, etiquetaTrue=None, etiquetaFalse=None):
self.tipo = tipo # Tipo de dato
self.nombre = nombre # Nombre del nodo donde retornamos el valor
self.isArray = isArray
self.direccion = direccion
self.codigo = []
self.siguiente = siguiente
self.etiquetaTrue = etiquetaTrue
self.etiquetaFalse = etiquetaFalse
def __repr__(self):
return f"<Nodo>{self.tipo} {self.nombre}"
| jpcifuentes16/antlr-python3 | Visitor/Nodo.py | Nodo.py | py | 541 | python | es | code | 0 | github-code | 36 |
74060632744 | import time
from server.decorators import with_logger
from server.rating import RatingType
from .game import Game
from .typedefs import GameType, InitMode, ValidityState
@with_logger
class CustomGame(Game):
init_mode = InitMode.NORMAL_LOBBY
game_type = GameType.CUSTOM
def __init__(self, id_, *args, **kwargs):
new_kwargs = {
"rating_type": RatingType.GLOBAL,
"setup_timeout": 30
}
new_kwargs.update(kwargs)
super().__init__(id_, *args, **new_kwargs)
async def _run_pre_rate_validity_checks(self):
limit = len(self.players) * 60
if not self.enforce_rating and time.time() - self.launched_at < limit:
await self.mark_invalid(ValidityState.TOO_SHORT)
| FAForever/server | server/games/custom_game.py | custom_game.py | py | 755 | python | en | code | 64 | github-code | 36 |
22782479678 | #
# @lc app=leetcode id=118 lang=python3
#
# [118] Pascal's Triangle
#
# https://leetcode.com/problems/pascals-triangle/description/
#
# algorithms
# Easy (58.01%)
# Likes: 11256
# Dislikes: 361
# Total Accepted: 1.4M
# Total Submissions: 1.9M
# Testcase Example: '5'
#
# Given an integer numRows, return the first numRows of Pascal's triangle.
#
# In Pascal's triangle, each number is the sum of the two numbers directly
# above it as shown:
#
#
# Example 1:
# Input: numRows = 5
# Output: [[1],[1,1],[1,2,1],[1,3,3,1],[1,4,6,4,1]]
# Example 2:
# Input: numRows = 1
# Output: [[1]]
#
#
# Constraints:
#
#
# 1 <= numRows <= 30
#
#
#
# @lc code=start
class Solution:
def generate(self, numRows: int) -> List[List[int]]:
res = [[0 for _ in range(i + 1)] for i in range(numRows)]
for row in res:
row[0] = 1
row[-1] = 1
for i in range(1, numRows):
for j in range(1, len(res[i]) - 1):
res[i][j] = res[i - 1][j - 1] + res[i - 1][j]
return res
# @lc code=end
| Zhenye-Na/leetcode | python/118.pascals-triangle.py | 118.pascals-triangle.py | py | 1,061 | python | en | code | 17 | github-code | 36 |
31056930437 | import os.path
from flask import Flask
from flaskext.sqlalchemy import SQLAlchemy
CONFIG_FILEPATH = os.path.join(os.path.dirname(__file__), "../config.cfg")
def auto_register_modules(app):
"""Registers modules from :mod:`subleekr` to application."""
import subleekr
for modname in subleekr.__modules__:
__import__("{0}.{1}".format(subleekr.__name__, modname))
module = getattr(subleekr, modname)
module.app.super_app = app
app.register_module(module.app)
def create_app(__name__=__name__):
app = Flask(__name__)
try:
app.config.from_pyfile(CONFIG_FILEPATH)
except IOError:
pass
auto_register_modules(app)
app.db = SQLAlchemy(app)
return app
| sublee/subleekr | subleekr/app.py | app.py | py | 732 | python | en | code | 1 | github-code | 36 |
34222277716 | # -*- coding: utf-8 -*-
import logging
import xml.sax
import slpyser.xmlparser.handlers as handlers
from slpyser.model.abap_objects.AbapDictionary import AbapDictionary
from slpyser.model.abap_objects.AbapMessageClass import AbapMessageClass
from slpyser.model.abap_objects.AbapTextPool import AbapTextElement
class SAPLinkContentHandle(xml.sax.ContentHandler):
"""
Implementation for SAX XML parser handle SAPLink file syntax.
"""
def __init__(self):
"""
Constructor
"""
self.__logger = logging.getLogger(__name__)
xml.sax.ContentHandler.__init__(self)
self._matrix_element_case_handler = {
# TextPool elements
'TEXTPOOL': [
self._startTextPool,
self._charactersTextPool,
self._endTextPool
],
'TEXTELEMENT': [
self._startTextPoolTextElement,
self._charactersTextPoolTextElement,
self._endTextPoolTextElement
],
# Message Class elements
'MSAG': [
self._startMessageClass,
None,
self._endMessageClass
],
'T100': [
self._startMessageClassMessage,
None,
None,
],
# General elements
'SOURCE': [
self._startSourceCode,
self._charactersSourceCode,
self._endSourceCode
],
'LANGUAGE': [
self._startTextLanguage,
self._charactersTextLanguage,
self._endTextLanguage
],
}
"""
Each element have three handlers, declared in that order:
1st: handle start of an element (retrieve element attributes);
2nd: handle contents of an element (retrieve data inside element);
3rd: handle end of an element.
"""
self.__unhandled_element = [
self._startUnhandled,
self._charactersUnhandled,
self._endUnhandled
]
# Attributes to be returned after parsing
self._abap_message_classes = {}
# Internal attributes, store references of current processed abap objects
self.__current_source_code_reference = None
self.__current_text_pool_reference = None
self.__current_class_documentation_reference = None
self.__current_text_language = None
self.__current_message_class = None
# Helper attributes
self.__current_tag = None
self.__current_tag_stack = []
# Decoupled parsers
self.__programs_parser = handlers.Program(owner=self)
self._matrix_element_case_handler.update(self.__programs_parser.map_parse())
self.__ddic_parser = handlers.DDIC(owner=self)
self._matrix_element_case_handler.update(self.__ddic_parser.map_parse())
self.__class_library_parser = handlers.ClassLibrary(owner=self)
self._matrix_element_case_handler.update(self.__class_library_parser.map_parse())
self.__function_group_parser = handlers.FunctionGroup(owner=self)
self._matrix_element_case_handler.update(self.__function_group_parser.map_parse())
@property
def abapClasses(self):
return self.__class_library_parser.parsed_classes
@property
def abapFunctionGroups(self):
return self.__function_group_parser.parsed_function_groups
@property
def abapMessageClasses(self):
return self._abap_message_classes
@property
def abapDictionary(self):
return AbapDictionary.from_ddic_handler(self.__ddic_parser)
@property
def abapPrograms(self):
return self.__programs_parser.parsed_programs
def startElement(self, name, attrs):
"""Parses start element"""
# Upper case on name because SAPLINK haven't used same case on all elements.
self.__current_tag = name.upper()
self.__current_tag_stack.append(self.__current_tag)
start_element_handler = self._matrix_element_case_handler.get(self.__current_tag, self.__unhandled_element)[0]
if start_element_handler is not None:
start_element_handler(name.upper(), attrs)
def characters(self, content):
"""
Parses inner contents of current element.
This method is called for each new line inside that element.
"""
characters_handler = self._matrix_element_case_handler.get(self.__current_tag, self.__unhandled_element)[1]
if characters_handler is not None:
characters_handler(content)
def endElement(self, name):
"""Parses end of element."""
if self.__current_tag != name.upper():
self.__logger.error('ERROR parsing file, current element was %s but closing element was %s' , self.__current_tag, name.upper())
end_element_handler = self._matrix_element_case_handler.get(self.__current_tag, self.__unhandled_element)[2]
if end_element_handler is not None:
end_element_handler(name.upper())
self.__current_tag_stack.pop()
# FIXME: Append None to currentTagStack to avoid little hack?
self.__current_tag = self.__current_tag_stack[-1] if len(self.__current_tag_stack) > 0 else None
# Below are declared method to properly handle elements and its contents
def _startMessageClass(self, name, attrs):
self.__logger.debug('Start message class')
name = attrs.get('ARBGB')
original_language = attrs.get('MASTERLANG')
responsible = attrs.get('RESPUSER', '')
short_text = attrs.get('STEXT', '')
message_class = AbapMessageClass(Name=name,
OriginalLanguage=original_language,
Responsible=responsible,
ShortText=short_text)
self.__current_message_class = message_class
def _endMessageClass(self, name):
msg_class = self.__current_message_class
self._abap_message_classes[msg_class.name] = msg_class
self.__current_message_class = None
def _startMessageClassMessage(self, name, attrs):
self.__logger.debug('Start Message Class Message')
language = attrs.get('SPRSL')
number = attrs.get('MSGNR')
text = attrs.get('TEXT')
message = AbapMessageClass.Message(Language=language,
Number=number,
Text=text)
if self.__current_message_class.language_mapping.get(language) == None:
self.__current_message_class.language_mapping[language] = {}
self.__current_message_class.language_mapping[language][number] = message
def _startSourceCode(self, name, attrs):
self.__logger.debug('Start Source Code')
def _charactersSourceCode(self, content):
self.__current_source_code_reference.source_code.append(content)
def charactersSourceCode(self, content):
self._charactersSourceCode(content)
def _endSourceCode(self, name):
self.__logger.debug('End Source Code')
def _startTextLanguage(self, name, attrs):
self.__logger.debug('Start Text Language')
self.__current_text_language = attrs.get('SPRAS')
# Initializing language dict
if self.__current_text_pool_reference is not None:
self.__current_text_pool_reference.language_mapping[self.__current_text_language] = {}
elif self.__current_class_documentation_reference is not None:
self.__current_class_documentation_reference.languageMappint[self.__current_text_language] = []
def _charactersTextLanguage(self, content):
pass
def _endTextLanguage(self, name):
self.__logger.debug('End Text Language')
self.__current_text_language = None
def _startTextPool(self, name, attrs):
self.__logger.debug('Start Text Pool')
def _charactersTextPool(self, content):
pass
def _endTextPool(self, name):
self.__logger.debug('End Text Pool')
def _startTextPoolTextElement(self, name, attrs):
self.__logger.debug('Start Text Pool Text Element')
text_id = attrs.get('ID')
key = attrs.get('KEY')
entry = attrs.get('ENTRY')
length = attrs.get('LENGTH')
text_element = AbapTextElement(TextId=text_id,
TextKey=key,
TextEntry=entry,
Length=length)
if self.__current_text_pool_reference is not None:
self.__current_text_pool_reference.addTextElement(Language=self.__current_text_language,
TextElement=text_element)
else:
self.__logger.warning('[FIXME] A text pool''s entry "%s" was found but the current abap object wasn''t expecting a text pool.', entry)
def _charactersTextPoolTextElement(self, content):
pass
def _endTextPoolTextElement(self, name):
self.__logger.debug('End Text Pool Text Element')
def _startUnhandled(self, name, attrs):
self.__logger.warning('Start of an unhandled element: %s', name)
def _charactersUnhandled(self, content):
self.__logger.warning('Content of unhandled tag: %s', content)
def _endUnhandled(self, name):
self.__logger.warning('End of an unhandled element: %s', name)
def set_current_source_code_reference(self, source_reference):
self.__current_source_code_reference = source_reference
source_reference.source_code = []
def finalize_source_code(self):
"""
Join the source code's array into a string, and clean it's reference from parser.
"""
self.__current_source_code_reference.source_code = ''.join(self.__current_source_code_reference.source_code)
self.__current_source_code_reference = None
def set_current_textpool_reference(self, textpool_reference):
self.__current_text_pool_reference = textpool_reference
def finalize_textpool(self):
self.__current_text_pool_reference = None
| thalesvb/slpyser | slpyser/xmlparser/SAPLinkContentHandle.py | SAPLinkContentHandle.py | py | 10,301 | python | en | code | 0 | github-code | 36 |
69954683303 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Time : 2020/3/21 0021
# @Author : justin.郑 3907721@qq.com
# @File : covid.py
# @Desc : 获取疫情数据
import json
import time
import demjson
import jsonpath
import requests
import pandas as pd
from io import BytesIO
from PIL import Image
from bs4 import BeautifulSoup
def covid_163(indicator="实时"):
"""
网易-新冠状病毒
https://news.163.com/special/epidemic/?spssid=93326430940df93a37229666dfbc4b96&spsw=4&spss=other&#map_block
https://news.163.com/special/epidemic/?spssid=93326430940df93a37229666dfbc4b96&spsw=4&spss=other&
:return: 返回指定 indicator 的数据
:rtype: pandas.DataFrame
"""
url = "https://c.m.163.com/ug/api/wuhan/app/data/list-total"
headers = {
"user-agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/79.0.3945.130 Safari/537.36",
}
payload = {
"t": int(time.time() * 1000),
}
r = requests.get(url, params=payload, headers=headers)
data_json = r.json()
# data info
url = "https://news.163.com/special/epidemic/"
r = requests.get(url, headers=headers)
soup = BeautifulSoup(r.text, "lxml")
data_info_df = pd.DataFrame(
[
item.text.strip().split(".")[1]
for item in soup.find("div", attrs={"class": "data_tip_pop_text"}).find_all(
"p"
)
]
)
data_info_df.columns = ["info"]
# 中国历史时点数据
hist_today_df = pd.DataFrame(
[item["today"] for item in data_json["data"]["chinaDayList"]],
index=[item["date"] for item in data_json["data"]["chinaDayList"]],
)
# 中国历史累计数据
hist_total_df = pd.DataFrame(
[item["total"] for item in data_json["data"]["chinaDayList"]],
index=[item["date"] for item in data_json["data"]["chinaDayList"]],
)
# 中国实时数据
current_df = pd.DataFrame.from_dict(data_json["data"]["chinaTotal"])
# 世界历史时点数据
outside_today_df = pd.DataFrame(
[item["today"] for item in data_json["data"]["areaTree"]],
index=[item["name"] for item in data_json["data"]["areaTree"]],
)
# 世界历史累计数据
outside_total_df = pd.DataFrame(
[item["total"] for item in data_json["data"]["areaTree"]],
index=[item["name"] for item in data_json["data"]["areaTree"]],
)
# 全球所有国家及地区时点数据
all_world_today_df = pd.DataFrame(
jsonpath.jsonpath(data_json["data"]["areaTree"], "$..today"),
index=jsonpath.jsonpath(data_json["data"]["areaTree"], "$..name"),
)
# 全球所有国家及地区累计数据
all_world_total_df = pd.DataFrame(
jsonpath.jsonpath(data_json["data"]["areaTree"], "$..total"),
index=jsonpath.jsonpath(data_json["data"]["areaTree"], "$..name"),
)
# 中国各地区时点数据
area_total_df = pd.DataFrame(
[item["total"] for item in data_json["data"]["areaTree"][0]["children"]],
index=[item["name"] for item in data_json["data"]["areaTree"][0]["children"]],
)
# 中国各地区累计数据
area_today_df = pd.DataFrame(
[item["today"] for item in data_json["data"]["areaTree"][0]["children"]],
index=[item["name"] for item in data_json["data"]["areaTree"][0]["children"]],
)
# 疫情学术进展
url_article = "https://vip.open.163.com/api/cms/topic/list"
payload_article = {
"topicid": "00019NGQ",
"listnum": "1000",
"liststart": "0",
"pointstart": "0",
"pointend": "255",
"useproperty": "true",
}
r_article = requests.get(url_article, params=payload_article)
article_df = pd.DataFrame(r_article.json()["data"]).iloc[:, 1:]
# 咨询
url_info = "https://ent.163.com/special/00035080/virus_report_data.js"
payload_info = {
"_": int(time.time() * 1000),
"callback": "callback",
}
r_info = requests.get(url_info, params=payload_info, headers=headers)
data_info_text = r_info.text
data_info_json = demjson.decode(data_info_text.strip(" callback(")[:-1])
if indicator == "数据说明":
print(f"数据更新时间: {data_json['data']['lastUpdateTime']}")
return data_info_df
if indicator == "中国实时数据":
print(f"数据更新时间: {data_json['data']['lastUpdateTime']}")
return current_df
if indicator == "中国历史时点数据":
print(f"数据更新时间: {data_json['data']['lastUpdateTime']}")
return hist_today_df
if indicator == "中国历史累计数据":
print(f"数据更新时间: {data_json['data']['lastUpdateTime']}")
return hist_total_df
if indicator == "世界历史时点数据":
print(f"数据更新时间: {data_json['data']['lastUpdateTime']}")
return outside_today_df
if indicator == "世界历史累计数据":
print(f"数据更新时间: {data_json['data']['lastUpdateTime']}")
return outside_total_df
if indicator == "全球所有国家及地区时点数据":
print(f"数据更新时间: {data_json['data']['lastUpdateTime']}")
return all_world_today_df
elif indicator == "全球所有国家及地区累计数据":
print(f"数据更新时间: {data_json['data']['lastUpdateTime']}")
return all_world_total_df
elif indicator == "中国各地区时点数据":
print(f"数据更新时间: {data_json['data']['lastUpdateTime']}")
return area_today_df
elif indicator == "中国各地区累计数据":
print(f"数据更新时间: {data_json['data']['lastUpdateTime']}")
return area_total_df
elif indicator == "疫情学术进展":
return article_df
elif indicator == "实时资讯新闻播报":
return pd.DataFrame(data_info_json["list"])
elif indicator == "实时医院新闻播报":
return pd.DataFrame(data_info_json["hospital"])
elif indicator == "前沿知识":
return pd.DataFrame(data_info_json["papers"])
elif indicator == "权威发布":
return pd.DataFrame(data_info_json["power"])
elif indicator == "滚动新闻":
return pd.DataFrame(data_info_json["scrollNews"])
def covid_dxy(indicator="湖北"):
"""
20200315-丁香园接口更新分为国内和国外
丁香园-全国统计-info
丁香园-分地区统计-data
丁香园-全国发热门诊一览表-hospital
丁香园-全国新闻-news
:param indicator: ["info", "data", "hospital", "news"]
:type indicator: str
:return: 返回指定 indicator 的数据
:rtype: pandas.DataFrame
"""
url = "https://3g.dxy.cn/newh5/view/pneumonia"
r = requests.get(url)
r.encoding = "utf-8"
soup = BeautifulSoup(r.text, "lxml")
# news-china
text_data_news = str(
soup.find_all("script", attrs={"id": "getTimelineServiceundefined"})
)
temp_json = text_data_news[
text_data_news.find("= [{") + 2 : text_data_news.rfind("}catch")
]
if temp_json:
json_data = pd.DataFrame(json.loads(temp_json))
chinese_news = json_data[
["title", "summary", "infoSource", "provinceName", "sourceUrl"]
]
# news-foreign
text_data_news = str(soup.find_all("script", attrs={"id": "getTimelineService2"}))
temp_json = text_data_news[
text_data_news.find("= [{") + 2 : text_data_news.rfind("}catch")
]
json_data = pd.DataFrame(json.loads(temp_json))
foreign_news = json_data
# data-domestic
data_text = str(soup.find("script", attrs={"id": "getAreaStat"}))
data_text_json = json.loads(
data_text[data_text.find("= [{") + 2 : data_text.rfind("catch") - 1]
)
big_df = pd.DataFrame()
for i, p in enumerate(jsonpath.jsonpath(data_text_json, "$..provinceName")):
temp_df = pd.DataFrame(jsonpath.jsonpath(data_text_json, "$..cities")[i])
temp_df["province"] = p
big_df = big_df.append(temp_df, ignore_index=True)
domestic_city_df = big_df
data_df = pd.DataFrame(data_text_json).iloc[:, :7]
data_df.columns = ["地区", "地区简称", "现存确诊", "累计确诊", "-", "治愈", "死亡"]
domestic_province_df = data_df[["地区", "地区简称", "现存确诊", "累计确诊", "治愈", "死亡"]]
# data-global
data_text = str(
soup.find("script", attrs={"id": "getListByCountryTypeService2true"})
)
data_text_json = json.loads(
data_text[data_text.find("= [{") + 2: data_text.rfind("catch") - 1]
)
global_df = pd.DataFrame(data_text_json)
# info
dxy_static = soup.find(attrs={"id": "getStatisticsService"}).get_text()
data_json = json.loads(
dxy_static[dxy_static.find("= {") + 2 : dxy_static.rfind("}c")]
)
china_statistics = pd.DataFrame(
[
time.strftime(
"%Y-%m-%d %H:%M:%S", time.localtime(data_json["modifyTime"] / 1000)
),
data_json["currentConfirmedCount"],
data_json["confirmedCount"],
data_json["suspectedCount"],
data_json["curedCount"],
data_json["deadCount"],
data_json["seriousCount"],
data_json["suspectedIncr"],
data_json["currentConfirmedIncr"],
data_json["confirmedIncr"],
data_json["curedIncr"],
data_json["deadIncr"],
data_json["seriousIncr"],
],
index=[
"数据发布时间",
"现存确诊",
"累计确诊",
"境外输入",
"累计治愈",
"累计死亡",
"现存重症",
"境外输入较昨日",
"现存确诊较昨日",
"累计确诊较昨日",
"累计治愈较昨日",
"累计死亡较昨日",
"现存重症较昨日",
],
columns=["info"],
)
foreign_statistics = pd.DataFrame.from_dict(
data_json["foreignStatistics"], orient="index"
)
global_statistics = pd.DataFrame.from_dict(
data_json["globalStatistics"], orient="index"
)
# hospital
url = (
"https://assets.dxycdn.com/gitrepo/tod-assets/output/default/pneumonia/index.js"
)
payload = {"t": str(int(time.time()))}
r = requests.get(url, params=payload)
hospital_df = pd.read_html(r.text)[0].iloc[:, :-1]
if indicator == "中国疫情分省统计详情":
return domestic_province_df
if indicator == "中国疫情分市统计详情":
return domestic_city_df
elif indicator == "全球疫情分国家统计详情":
return global_df
elif indicator == "中国疫情实时统计":
return china_statistics
elif indicator == "国外疫情实时统计":
return foreign_statistics
elif indicator == "全球疫情实时统计":
return global_statistics
elif indicator == "中国疫情防控医院":
return hospital_df
elif indicator == "实时播报":
return chinese_news
elif indicator == "中国-新增疑似-新增确诊-趋势图":
img_file = Image.open(
BytesIO(requests.get(data_json["quanguoTrendChart"][0]["imgUrl"]).content)
)
img_file.show()
elif indicator == "中国-现存确诊-趋势图":
img_file = Image.open(
BytesIO(requests.get(data_json["quanguoTrendChart"][1]["imgUrl"]).content)
)
img_file.show()
elif indicator == "中国-现存疑似-趋势图":
img_file = Image.open(
BytesIO(requests.get(data_json["quanguoTrendChart"][2]["imgUrl"]).content)
)
img_file.show()
elif indicator == "中国-治愈-趋势图":
img_file = Image.open(
BytesIO(requests.get(data_json["quanguoTrendChart"][3]["imgUrl"]).content)
)
img_file.show()
elif indicator == "中国-死亡-趋势图":
img_file = Image.open(
BytesIO(requests.get(data_json["quanguoTrendChart"][4]["imgUrl"]).content)
)
img_file.show()
elif indicator == "中国-非湖北新增确诊-趋势图":
img_file = Image.open(
BytesIO(requests.get(data_json["hbFeiHbTrendChart"][0]["imgUrl"]).content)
)
img_file.show()
elif indicator == "中国-湖北新增确诊-趋势图":
img_file = Image.open(
BytesIO(requests.get(data_json["hbFeiHbTrendChart"][1]["imgUrl"]).content)
)
img_file.show()
elif indicator == "中国-湖北现存确诊-趋势图":
img_file = Image.open(
BytesIO(requests.get(data_json["hbFeiHbTrendChart"][2]["imgUrl"]).content)
)
img_file.show()
elif indicator == "中国-非湖北现存确诊-趋势图":
img_file = Image.open(
BytesIO(requests.get(data_json["hbFeiHbTrendChart"][3]["imgUrl"]).content)
)
img_file.show()
elif indicator == "中国-治愈-死亡-趋势图":
img_file = Image.open(
BytesIO(requests.get(data_json["hbFeiHbTrendChart"][4]["imgUrl"]).content)
)
img_file.show()
elif indicator == "国外-国外新增确诊-趋势图":
img_file = Image.open(
BytesIO(requests.get(data_json["foreignTrendChart"][0]["imgUrl"]).content)
)
img_file.show()
elif indicator == "国外-国外累计确诊-趋势图":
img_file = Image.open(
BytesIO(requests.get(data_json["foreignTrendChart"][1]["imgUrl"]).content)
)
img_file.show()
elif indicator == "国外-国外死亡-趋势图":
img_file = Image.open(
BytesIO(requests.get(data_json["foreignTrendChart"][2]["imgUrl"]).content)
)
img_file.show()
elif indicator == "国外-重点国家新增确诊-趋势图":
img_file = Image.open(
BytesIO(
requests.get(
data_json["importantForeignTrendChart"][0]["imgUrl"]
).content
)
)
img_file.show()
elif indicator == "国外-日本新增确诊-趋势图":
img_file = Image.open(
BytesIO(
requests.get(
data_json["importantForeignTrendChart"][1]["imgUrl"]
).content
)
)
img_file.show()
elif indicator == "国外-意大利新增确诊-趋势图":
img_file = Image.open(
BytesIO(
requests.get(
data_json["importantForeignTrendChart"][2]["imgUrl"]
).content
)
)
img_file.show()
elif indicator == "国外-伊朗新增确诊-趋势图":
img_file = Image.open(
BytesIO(
requests.get(
data_json["importantForeignTrendChart"][3]["imgUrl"]
).content
)
)
img_file.show()
elif indicator == "国外-美国新增确诊-趋势图":
img_file = Image.open(
BytesIO(
requests.get(
data_json["importantForeignTrendChart"][4]["imgUrl"]
).content
)
)
img_file.show()
elif indicator == "国外-法国新增确诊-趋势图":
img_file = Image.open(
BytesIO(
requests.get(
data_json["importantForeignTrendChart"][5]["imgUrl"]
).content
)
)
img_file.show()
elif indicator == "国外-德国新增确诊-趋势图":
img_file = Image.open(
BytesIO(
requests.get(
data_json["importantForeignTrendChart"][6]["imgUrl"]
).content
)
)
img_file.show()
elif indicator == "国外-西班牙新增确诊-趋势图":
img_file = Image.open(
BytesIO(
requests.get(
data_json["importantForeignTrendChart"][7]["imgUrl"]
).content
)
)
img_file.show()
elif indicator == "国外-韩国新增确诊-趋势图":
img_file = Image.open(
BytesIO(
requests.get(
data_json["importantForeignTrendChart"][8]["imgUrl"]
).content
)
)
img_file.show()
else:
try:
data_text = str(soup.find("script", attrs={"id": "getAreaStat"}))
data_text_json = json.loads(
data_text[data_text.find("= [{") + 2 : data_text.rfind("catch") - 1]
)
data_df = pd.DataFrame(data_text_json)
sub_area = pd.DataFrame(
data_df[data_df["provinceName"] == indicator]["cities"].values[0]
)
if sub_area.empty:
return print("暂无分区域数据")
sub_area.columns = ["区域", "现在确诊人数", "确诊人数", "疑似人数", "治愈人数", "死亡人数", "id"]
sub_area = sub_area[["区域", "现在确诊人数", "确诊人数", "疑似人数", "治愈人数", "死亡人数"]]
return sub_area
except IndexError as e:
print("请输入省/市的全称, 如: 浙江省/上海市 等")
def covid_baidu(indicator="湖北"):
"""
百度-新型冠状病毒肺炎-疫情实时大数据报告
https://voice.baidu.com/act/newpneumonia/newpneumonia/?from=osari_pc_1
:param indicator: 看说明文档
:type indicator: str
:return: 指定 indicator 的数据
:rtype: pandas.DataFrame
"""
url = "https://huiyan.baidu.com/openapi/v1/migration/rank"
payload = {
"type": "move",
"ak": "kgD2HiDnLdUhwzd3CLuG5AWNfX3fhLYe",
"adminType": "country",
"name": "全国",
}
r = requests.get(url, params=payload)
move_in_df = pd.DataFrame(r.json()["result"]["moveInList"])
move_out_df = pd.DataFrame(r.json()["result"]["moveOutList"])
url = "https://opendata.baidu.com/api.php"
payload = {
"query": "全国",
"resource_id": "39258",
"tn": "wisetpl",
"format": "json",
"cb": "jsonp_1580470773343_11183",
}
r = requests.get(url, params=payload)
text_data = r.text
json_data_news = json.loads(
text_data.strip("/**/jsonp_1580470773343_11183(").rstrip(");")
)
url = "https://opendata.baidu.com/data/inner"
payload = {
"tn": "reserved_all_res_tn",
"dspName": "iphone",
"from_sf": "1",
"dsp": "iphone",
"resource_id": "28565",
"alr": "1",
"query": "肺炎",
"cb": "jsonp_1606895491198_93137",
}
r = requests.get(url, params=payload)
json_data = json.loads(r.text[r.text.find("({") + 1 : r.text.rfind(");")])
spot_report = pd.DataFrame(json_data["Result"][0]["DisplayData"]["result"]["items"])
# domestic-city
url = "https://voice.baidu.com/act/newpneumonia/newpneumonia/?from=osari_pc_1"
r = requests.get(url)
soup = BeautifulSoup(r.text, "lxml")
data_json = demjson.decode(soup.find(attrs={"id": "captain-config"}).text)
big_df = pd.DataFrame()
for i, p in enumerate(
jsonpath.jsonpath(data_json["component"][0]["caseList"], "$..area")
):
temp_df = pd.DataFrame(
jsonpath.jsonpath(data_json["component"][0]["caseList"], "$..subList")[i]
)
temp_df["province"] = p
big_df = big_df.append(temp_df, ignore_index=True)
domestic_city_df = big_df
domestic_province_df = pd.DataFrame(data_json["component"][0]["caseList"]).iloc[
:, :-2
]
big_df = pd.DataFrame()
for i, p in enumerate(
jsonpath.jsonpath(data_json["component"][0]["caseOutsideList"], "$..area")
):
temp_df = pd.DataFrame(
jsonpath.jsonpath(
data_json["component"][0]["caseOutsideList"], "$..subList"
)[i]
)
temp_df["province"] = p
big_df = big_df.append(temp_df, ignore_index=True)
outside_city_df = big_df
outside_country_df = pd.DataFrame(
data_json["component"][0]["caseOutsideList"]
).iloc[:, :-1]
big_df = pd.DataFrame()
for i, p in enumerate(
jsonpath.jsonpath(data_json["component"][0]["globalList"], "$..area")
):
temp_df = pd.DataFrame(
jsonpath.jsonpath(data_json["component"][0]["globalList"], "$..subList")[i]
)
temp_df["province"] = p
big_df = big_df.append(temp_df, ignore_index=True)
global_country_df = big_df
global_continent_df = pd.DataFrame(data_json["component"][0]["globalList"])[
["area", "died", "crued", "confirmed", "confirmedRelative"]
]
if indicator == "热门迁入地":
return move_in_df
elif indicator == "热门迁出地":
return move_out_df
elif indicator == "今日疫情热搜":
return pd.DataFrame(json_data_news["data"][0]["list"][0]["item"])
elif indicator == "防疫知识热搜":
return pd.DataFrame(json_data_news["data"][0]["list"][1]["item"])
elif indicator == "热搜谣言粉碎":
return pd.DataFrame(json_data_news["data"][0]["list"][2]["item"])
elif indicator == "复工复课热搜":
return pd.DataFrame(json_data_news["data"][0]["list"][3]["item"])
elif indicator == "热门人物榜":
return pd.DataFrame(json_data_news["data"][0]["list"][4]["item"])
elif indicator == "历史疫情热搜":
return pd.DataFrame(json_data_news["data"][0]["list"][5]["item"])
elif indicator == "搜索正能量榜":
return pd.DataFrame(json_data_news["data"][0]["list"][6]["item"])
elif indicator == "游戏榜":
return pd.DataFrame(json_data_news["data"][0]["list"][7]["item"])
elif indicator == "影视榜":
return pd.DataFrame(json_data_news["data"][0]["list"][8]["item"])
elif indicator == "小说榜":
return pd.DataFrame(json_data_news["data"][0]["list"][9]["item"])
elif indicator == "疫期飙升榜":
return pd.DataFrame(json_data_news["data"][0]["list"][10]["item"])
elif indicator == "实时播报":
return spot_report
elif indicator == "中国分省份详情":
return domestic_province_df
elif indicator == "中国分城市详情":
return domestic_city_df
elif indicator == "国外分国详情":
return outside_country_df
elif indicator == "国外分城市详情":
return outside_city_df
elif indicator == "全球分洲详情":
return global_continent_df
elif indicator == "全球分洲国家详情":
return global_country_df
def covid_hist_city(city="武汉市"):
"""
疫情历史数据 城市
https://github.com/canghailan/Wuhan-2019-nCoV
2019-12-01开始
:return: 具体城市的疫情数据
:rtype: pandas.DataFrame
"""
url = "https://raw.githubusercontent.com/canghailan/Wuhan-2019-nCoV/master/Wuhan-2019-nCoV.json"
r = requests.get(url)
data_json = r.json()
data_df = pd.DataFrame(data_json)
return data_df[data_df["city"] == city]
def covid_hist_province(province="湖北省"):
"""
疫情历史数据 省份
https://github.com/canghailan/Wuhan-2019-nCoV
2019-12-01开始
:return: 具体省份的疫情数据
:rtype: pandas.DataFrame
"""
url = "https://raw.githubusercontent.com/canghailan/Wuhan-2019-nCoV/master/Wuhan-2019-nCoV.json"
r = requests.get(url)
data_json = r.json()
data_df = pd.DataFrame(data_json)
return data_df[data_df["province"] == province]
if __name__ == "__main__":
# 历史数据
# epidemic_hist_city_df = covid_hist_province()
# print(epidemic_hist_city_df)
# epidemic_hist_province_df = covid_hist_province(province="湖北省")
# print(epidemic_hist_province_df)
covid_dxy_df = covid_163()
print(covid_dxy_df) | justinzm/gopup | gopup/event/covid.py | covid.py | py | 24,122 | python | en | code | 2,477 | github-code | 36 |
18845748066 | import os,sys,shutil,multiprocessing
sys.path.append("..")
from base.get_config import MyConfig as myconfig
pid=multiprocessing.current_process().pid#获取pid进程编号
folderpath=myconfig("project","project_path").value+myconfig("project","data_path").value
def folder_create(path=None):
if path:
path=path
else:
path=folderpath+"/"+"testsuite-pid-"+str(pid)
if os.path.exists(path)==False:
os.mkdir(path)
return path
def folder_clear(path=folderpath):
path=os.path.abspath(path)#转换为绝对路径
#print("目录名称",os.path.dirname(path))
if path.split("\\")[len(path.split("\\"))-1]=="runningdata":#如果是runningdata目录,name操作删除
for root,dirs,filename in os.walk(path,False):
#print("-------------------------------------------------")
#print(str(root),"||",str(dirs)+"||"+str(filename))
for dir in dirs:
if dir!="data_debug" and dir!="data_running" and root==path:
shutil.rmtree(path+"/"+dir)
else:
print("清空目录:"+str(path)+"下文件夹,谨慎操作!")
if __name__=="__main__":
print("创建了文件:",folder_create())
print(folder_clear()) | cainiaosun/study | 测试/自动化合并/autotest/base/web_ui/running_folder.py | running_folder.py | py | 1,130 | python | en | code | 0 | github-code | 36 |
73000266663 | def create_model(opt):
model = None
if opt.model == 'pix2pix':
assert(opt.dataset_mode == 'aligned')
from .pix2pix_model import Pix2PixModel
model = Pix2PixModel()
elif opt.model == 'pix2pix_three':
assert(opt.dataset_mode == 'aligned_three')
from .pix2pix_model_three import Pix2PixModel
model = Pix2PixModel()
elif opt.model == 'pix2pix_attn':
assert (opt.dataset_mode == 'aligned_three')
from .pix2pix_model_attn import Pix2PixModel
model = Pix2PixModel()
elif opt.model == 'e2e':
assert (opt.dataset_mode == 'paralleled')
from .e2e_model import E2EModel
model = E2EModel()
else:
raise NotImplementedError('model [%s] not implemented.' % opt.model)
model.initialize(opt)
print("model [%s] was created" % (model.name()))
return model
| cyduoot/facefusion | models/__init__.py | __init__.py | py | 876 | python | en | code | 0 | github-code | 36 |
30898403361 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Author: dutrasr
# @Date: 2015-08-27 01:17:31
# @Last Modified by: dutrasr
# @Last Modified time: 2015-09-20 22:13:08
#import modules
import os
###################################################################################
# Recebe um diretorio com as pastas contendo as coletas e retorna um dicionario com
# a chave representando a pasta e a lista representando os arquivos contidos nela
###################################################################################
def getFiles(CurrentDir):
try:
# Get all the folders on the current directory.
dList = [os.path.join(CurrentDir, folders) for folders in os.listdir(CurrentDir)]
#return dList
except:
print ("Something went wrong!!")
return
folderFiles = {}
#print (folders)
for i in dList:
#aux =[]
#print (i)
#aux.append(os.listdir(i))
folderFiles[i] = os.listdir(i)
#print (folderFiles)
return folderFiles
###################################################################################
################################################################################### | dutrasrInf/Biometrics-of-Human-Gait-Using-Kinect | GenerateInfo/travelToDir.py | travelToDir.py | py | 1,117 | python | de | code | 0 | github-code | 36 |
42130961103 | from beautifultable import BeautifulTable
from Contact_new import Contact
class InMemoryImpl:
contact_list = []
@classmethod
def addContact(cls):
name = input("enter name: ")
email = input("enter email: ")
mobile = input("enter mobile: ")
address = input("enter address: ")
cls.contact_list.append(Contact(name, email, mobile, address))
print(f"Contact is added succesfully!!! with name: {name} ")
@classmethod
def deleteContact(cls):
name = input("enetr name to delete: ")
contact = cls.get_contact_by_name(name)
if contact:
cls.contact_list.remove(contact)
print(f"contact: {name} deleted successfully!!!!")
else:
print(f"contact with name : {name} not found")
@classmethod
def viewContact(cls):
InMemoryImpl._paint(cls.contact_list)
@classmethod
def search(cls):
if len(cls.contact_list) > 0 :
name = input("enetr name to search: ")
s_list = list(filter(lambda x:name.lower() in x.get_name().lower(),cls.contact_list))
if len(s_list) > 0:
InMemoryImpl._paint(s_list)
else:
print("there is no data found with searched name: {name}")
else:
print("Contact book is empty!!..... You cant search!!!")
@classmethod
def get_contact_by_name(cls, name):
if len(cls.contact_list) > 0:
contact = list(filter(lambda x:x.get_name().lower() == name.lower(), cls.contact_list))
return contact[0] if contact else None
@classmethod
def updateContact(cls):
name = input("enetr name to update: ")
contact = cls.get_contact_by_name(name)
if contact:
print("1.Name 2.Email 3.Mobile 4.Address")
ch = int(input("enter your choice: "))
if ch == 1:
print(f"Old name: {contact.get_name()}")
name= input("entyer the new name: ")
if name:
contact.set_name(name)
elif ch == 2:
print(f"Old email: {contact.get_email()}")
email= input("entyer the new email: ")
if email:
contact.set_email(email)
elif ch == 3:
print(f"Old mobile: {contact.get_mobile()}")
mobile= input("entyer the new mobile: ")
if mobile:
contact.set_mobile(mobile)
elif ch == 4:
print(f"Old address: {contact.get_address()}")
address= input("entyer the new address: ")
if address:
contact.set_address(address)
else:
print(f"contact not found with name: {name}")
@staticmethod
def _paint(lst):
if len(lst) != 0:
table=BeautifulTable()
table.column_headers = ["Name", "Email", "Mobile", "Address"]
for c in lst:
table.append_row([c.get_name(),c.get_email(), c.get_mobile(), c.get_address()])
print(table)
else:
print(f"Contact Book is empty!.....") | adityaKoteCoder/codex | Contactbook/inmemory.py | inmemory.py | py | 3,251 | python | en | code | 0 | github-code | 36 |
1947165541 | def solution(s):
p_ = 0
y_ = 0
for i in s:
if i in ['p', 'P']:
p_ += 1
if i in ['y', 'Y']:
y_ += 1
if p_ == y_: return True
else: return False
| hellokena/Programmers | 코딩테스트 연습/Level 1/연습문제_문자열 내 p와 y의 개수.py | 연습문제_문자열 내 p와 y의 개수.py | py | 203 | python | en | code | 0 | github-code | 36 |
25756148314 | computer_price = {
"HP": 600,
"DELL": 650,
"MACBOOK": 12000,
"ASUS": 400,
"ACER": 350,
"TOSHIBA": 600,
"FUJITSU": 900,
"ALIENWARE": 1000,
}
user_key = input("Type of computer you want to check: ")
print("Price of",user_key,":",computer_price[user_key.upper])
| Supporter09/C4T-B05 | python/section11/Dict4/print_price_2.py | print_price_2.py | py | 291 | python | en | code | 1 | github-code | 36 |
27481966234 | def some_function(sentence):
bad_words = ["lame"]
lister = []
for i in sentence:
lister.append(i)
new_sent = "".join(lister)
for a in bad_words:
if a in new_sent:
new_sent = new_sent.replace(a, "*" * len(a))
print(new_sent)
some_function("It's so lame that today is already sunday night. I mean... What happened to our weekend?") | kteel620/Python-Files | Word_Censor.py | Word_Censor.py | py | 387 | python | en | code | 0 | github-code | 36 |
154684037 | from django.urls import path
from home import views
urlpatterns = [
path('sign', views.sign, name='sign'),
path('', views.loginp, name='loginp'),
path('logoutp', views.logoutp, name='logoutp'),
path('base', views.base, name='base'),
path('mainhome', views.mainhome, name='home'),
# path('accounts/login/', views.predict_demand_supply_dtree, name='predict'),
path('predict/', views.predict_demand_supply_dtree, name='predict'),
path('prediction_results', views.predict_demand_supply_dtree, name='predictResult'),
path('pcw', views.pcw, name='pcw'),
path('prediction_results2', views.pcw, name='predictResult2'),
path('pcacw', views.pcacw, name='pcacw'),
path('prediction_results3', views.pcacw, name='predictResult3')
]
| Atharv4507/SP | home/urls.py | urls.py | py | 768 | python | en | code | 0 | github-code | 36 |
37290685399 | from django.shortcuts import render,redirect
from django.template.context_processors import csrf
from django.views.decorators.csrf import csrf_exempt
from django.http import HttpResponse
from utilities import utility_functions
from django.contrib.auth.decorators import login_required
from django.http import HttpResponse
from coupons.models import Coupon
from user_profile.models import UserProfile
from bitasync_site.models import Data_Transfer_Plan
from models import Purchase,PendingPurchase
import hashlib
from django.template import loader
from django.core.mail import send_mail
from django.core.mail import EmailMultiAlternatives
from utilities.utility_functions import generate_md5_hash
from payline_dotir.payment_gateway import send_url, get_result
from payline_dotir.settings import SEND_URL_FINAL, PAYLINE_DOTIR_API_FINAL
@login_required
def pay_for_a_plan(request,plan_name):
context = {}
#check if the plan is valid.
valid_plans = ["L1","L2","L5","U1","U3","U6"]
if plan_name not in valid_plans :
raise Http404("Data transfer selected is not valid.")
# get the plan the user has selected
all_plans = Data_Transfer_Plan.objects.all()
plan = utility_functions.get_plan_by_name(all_plans,plan_name)
# get the user's coupons
user_profile = UserProfile.objects.get( user = request.user )
user_existing_coupons = Coupon.objects.filter( user_profile = user_profile )
# create the temp plan for the plan selected by user
selected_plan = utility_functions.create_temp_plan(plan, user_existing_coupons)
context['selected_plan'] = selected_plan
# does the user have any coupons?
if not user_existing_coupons:
context['coupon_available'] = False
else:
# if the customer has some coupons
context['coupon_available'] = True
context['existing_coupons'] = user_existing_coupons
# get the best coupon
best_coupon = utility_functions.get_best_coupon(user_existing_coupons)
return render(request,'payment/pay_for_a_plan.html',context)
@login_required
def initialise_payment_payline(request,plan_name):
#check if the plan is valid.
valid_plans = ["L1","L2","L5","U1","U3","U6"]
if plan_name not in valid_plans :
raise Http404("Data transfer selected is not valid.")
# get the plan the user has selected
all_plans = Data_Transfer_Plan.objects.all()
plan = utility_functions.get_plan_by_name(all_plans,plan_name)
# get the user's coupons
user_profile = UserProfile.objects.get( user = request.user )
user_existing_coupons = Coupon.objects.filter( user_profile = user_profile )
# create the temp plan for the plan selected by user
selected_plan = utility_functions.create_temp_plan(plan, user_existing_coupons)
# create a pending purchase
pending_purchase = PendingPurchase()
pending_purchase.data_transfer_plan = plan
pending_purchase.user = request.user
pending_purchase.save()
# prepare amount
if user_existing_coupons:
amount = selected_plan.discounted_price
else:
amount = selected_plan.original_price
# get gateway_url
# integrate pending purchase hashcode in redirect url
redirect_url = 'http://gooshibegooshi.com/payment/result_payline/'+pending_purchase.hashcode+'/'
gateway_url = send_url(amount, redirect_url,SEND_URL_FINAL, PAYLINE_DOTIR_API_FINAL)
# redirect to payline.ir
return redirect(gateway_url)
@csrf_exempt
def result_payline(request,pending_purchase_hashcode):
trans_id = request.POST['trans_id']
id_get = request.POST['id_get']
final_result = get_result(PAYLINE_DOTIR_API_FINAL, trans_id, id_get)
context = {}
# retrieve the pending purchase
pending_purchase = PendingPurchase.objects.get(hashcode = pending_purchase_hashcode)
# get the user's coupons
user_profile = UserProfile.objects.get( user = pending_purchase.user )
user_existing_coupons = Coupon.objects.filter( user_profile = user_profile )
# create the temp plan for the plan selected by user
selected_plan = utility_functions.create_temp_plan(pending_purchase.data_transfer_plan, user_existing_coupons)
context['selected_plan'] = selected_plan
response = None
if final_result is None:
response = pay_for_a_plan_failure(request,context)
else:
if int(final_result) == 1:
response = pay_for_a_plan_success(request,pending_purchase,context,user_existing_coupons,selected_plan)
else:
response = pay_for_a_plan_failure(request,context)
# remove pending purchase
pending_purchase.delete()
return response
def pay_for_a_plan_success(request,pending_purchase,context,user_existing_coupons,selected_plan):
# add the purchase to the database
new_purchase = Purchase()
new_purchase.user = pending_purchase.user
new_purchase.data_transfer_plan = pending_purchase.data_transfer_plan
if user_existing_coupons:
new_purchase.amount_paid = selected_plan.discounted_price
else:
new_purchase.amount_paid = selected_plan.original_price
new_purchase.remaining_allowance_frequency = pending_purchase.data_transfer_plan.freq
new_purchase.save()
# save follow_up number using hash
follow_up_number = generate_md5_hash(str(new_purchase.id))
new_purchase.follow_up_number = follow_up_number
new_purchase.save()
context['follow_up_number'] = follow_up_number
# if necessary, remove user's best coupon
if user_existing_coupons:
best_coupon = utility_functions.get_best_coupon(user_existing_coupons)
best_coupon.delete()
# send an email
plaintext = loader.get_template('payment/pay_for_a_plan_complete_email.txt')
htmly = loader.get_template('payment/pay_for_a_plan_complete_email.html')
subject = loader.get_template('payment/pay_for_a_plan_complete_email_subject.html')
subject_content = subject.render(context).replace('\n',' ')
text_content = plaintext.render(context)
html_content = htmly.render(context)
from_email = 'sales@gooshibegooshi.com'
recipient_list = [new_purchase.user.email]
msg = EmailMultiAlternatives(subject_content, text_content, from_email, recipient_list)
msg.attach_alternative(html_content, "text/html")
msg.send()
# return response to the user.
return render(request,'payment/successful_payment.html',context)
def pay_for_a_plan_failure(request,context):
return render(request,'payment/failed_payment.html',context)
| bitapardaz/bitasync | payment/views.py | views.py | py | 6,561 | python | en | code | 0 | github-code | 36 |
41644824235 | from pathlib import Path
import string
import unicodedata
import time
import torch
import torch.nn as nn
import numpy as np
from torch.optim import Adam
def find_files(path, pattern):
return Path(path).glob(pattern)
names_dir = './datasets/data/names'
pat = '*.txt'
print(list(find_files(names_dir, pat)))
letters = string.ascii_letters + " .,;'"
n_letters = len(letters)
def unicode_to_ascii(s):
return ''.join(
c for c in unicodedata.normalize('NFD', s)
if unicodedata.category(c) != 'Mn'
and c in letters
)
print(unicode_to_ascii('Ślusàrski'))
def read_lines(path):
with open(path, encoding='utf-8') as f:
return [ unicode_to_ascii(line) for line in f ]
categories = []
category_lines = {}
for f in find_files(names_dir, pat):
category = f.name.split('.')[0]
categories.append(category)
lines = read_lines(f)
category_lines[category] = lines
n_categories = len(categories)
print(category_lines['Italian'][:5])
def letter_to_tensor(letter):
i = letters.index(letter)
tensor = torch.zeros(1, n_letters)
tensor[0][i] = 1
return tensor
def line_to_tensor(line):
letter_tensors = [letter_to_tensor(letter) for letter in line]
return torch.cat(letter_tensors).view(len(line), 1, -1)
class RNN(nn.Module):
def __init__(self, input_size, hidden_size, output_size):
super(RNN, self).__init__()
self.hidden_size = hidden_size
self.gru = nn.GRU(input_size, hidden_size)
self.h2o = nn.Linear(hidden_size, output_size)
self.softmax = nn.LogSoftmax(dim=1)
def init_hidden(self, batch_size):
return torch.zeros(1, batch_size, self.hidden_size)
def forward(self, input):
batch_size = input.size()[1]
hidden = self.init_hidden(batch_size)
gru_out, h_n = self.gru(input, hidden)
output = self.h2o(h_n).view(batch_size, -1)
output = self.softmax(output)
return output
def random_choice(l):
return np.random.choice(l)
def random_training_example():
i = np.random.randint(n_categories)
category = categories[i]
line = random_choice(category_lines[category])
category_tensor = torch.tensor([i], dtype=torch.long)
line_tensor = line_to_tensor(line)
return category, line, category_tensor, line_tensor
def category_from_output(output):
i = output.argmax().item()
return categories[i], i
def time_since(since):
now = time.time()
s = now - since
m = np.floor(s / 60)
s -= m * 60
return '%dm %ds' % (m, s)
hidden_size = 128
rnn = RNN(n_letters, hidden_size, n_categories)
criterion = nn.NLLLoss()
lr = 0.005
optimizer = Adam(rnn.parameters(), lr)
n_iters = 100000
print_every = 5000
plot_every = 1000
current_loss = 0
all_losses = []
start = time.time()
for it in range(1, n_iters + 1):
category, line, category_tensor, line_tensor = random_training_example()
optimizer.zero_grad()
output = rnn(line_tensor)
loss = criterion(output, category_tensor)
loss.backward()
optimizer.step()
current_loss += loss.item()
# Print iter number, loss, name and guess
if it % print_every == 0:
guess, guess_i = category_from_output(output)
correct = '√' if guess == category else '× (%s)' % category
print('%d %d%% (%s) %.4f %s / %s %s' % (it, it / n_iters * 100, time_since(start), loss, line, guess, correct))
# Add current loss avg to list of losses
if it % plot_every == 0:
all_losses.append(current_loss / plot_every)
current_loss = 0
plt.plot(all_losses)
confusion = torch.zeros(n_categories, n_categories)
n_confusion = 10000
for i in range(n_confusion):
category, line, category_tensor, line_tensor = random_training_example()
output = rnn(line_tensor)
guess, guess_i = category_from_output(output)
category_i = categories.index(category)
confusion[category_i][guess_i] += 1
for i in range(n_categories):
confusion[i] = confusion[i] / confusion[i].sum()
fig = plt.figure()
ax = fig.add_subplot(111)
cax = ax.matshow(confusion.numpy())
fig.colorbar(cax)
ax.set_xticklabels([''] + all_categories, rotation=90)
ax.set_yticklabels([''] + all_categories)
ax.xaxis.set_major_locator(ticker.MultipleLocator(1))
ax.yaxis.set_major_locator(ticker.MultipleLocator(1))
| sbl1996/pytorch-snippets | char_lstm.py | char_lstm.py | py | 4,373 | python | en | code | 0 | github-code | 36 |
20158271860 | #import sqlTesting as sqlT
from matplotlib.pyplot import connect
import connect as sqlT
import os
#Runs through connect.py until the user is done
again = 'y'
while(again == 'y'):
os.system('CLS')
sqlT.user(sqlT.my_cursor, sqlT.my_db)
again = input("Press y to continue and n to quit: ")
while(again != 'y' and again != 'n'):
print("Not a correct response")
again = input("Press y to continue and n to quit: ")
if(again == 'n'):
print("Thank you, have a nice day!") | ZachGr/project0 | main.py | main.py | py | 522 | python | en | code | 0 | github-code | 36 |
13653886738 | import matplotlib.pyplot as plt
def main():
filename = input('Enter a file name: ')
X = [0,1,2,3,4,5]
Y=[0.78,0.92,0.91,0.88,0.88,0.89]
#plt.ylabel('Generation with best result')
plt.ylabel('Accuracy of result')
plt.plot(X,Y)
plt.xlabel('Degree of polynomial')
plt.show()
x = [[9, 5, 9], [7, 8, 9]]
print(x)
if __name__ == '__main__':
main() | agatachamula/genetic-algorthm | graphs.py | graphs.py | py | 422 | python | en | code | 0 | github-code | 36 |
74258267945 | # https://takeuforward.org/interviews/tcs-nqt-coding-sheet-tcs-coding-questions/
# Eample 1:.
# Input: N = 1100110
# Output: 146
# Explaxnation: 1100110 when converted to octal number is “146”.
s = "1100110"
n = len(s)
# Ensure the length of s is a multiple of 3 by adding leading zeros if necessary
if n % 3 == 1:
s = "00" + s
elif n % 3 == 2:
s = "0" + s
n = len(s)
ans = ""
for i in range(0, n, 3):
temp = (int(s[i]) - 0) * 4 + (int(s[i + 1]) - 0) * 2 + (int(s[i + 2]) - 0) * 1
print(temp)
ans += str(temp)
print(ans)
| Danuragtiwari/tcs-nqt | Number System/2.py | 2.py | py | 636 | python | en | code | 0 | github-code | 36 |
34921941272 | import numpy as np
import pandas as pd
import torch
import pickle
import random
import re
from tqdm import tqdm
from transformers.modeling_utils import PoolerAnswerClass
data_path={
'mfc': '/data/news/mfc/',
'gvfc': '/data/news/GVFC/GVFC/GVFC_headlines_and_annotations.xlsx',
'twitter': '/data/tweet/twitter/',
'immi': '/data/tweet/immi/',
'fora': '/data/debate/issue_framing/data/dialogue/'
}
all_issue_map={
'climate':0,
'deathpenalty':1,
'guncontrol':2,
'immigration':3,
'samesex':4,
'tobacco':5,
'aca':6,
'abort':7,
'immig':3,
'isis':8,
'guns':2,
'lgbt':4
}
def save_obj(obj, name):
with open('obj' + name + '.pkl', 'wb+') as f:
pickle.dump(obj, f, pickle.HIGHEST_PROTOCOL)
def load_obj(name,path=None):
if path is None:
with open('obj' + name + '.pkl', 'rb') as f:
return pickle.load(f)
else:
with open(path+'obj' + name + '.pkl', 'rb') as f:
return pickle.load(f)
def clean(text):
pattern1='(https?|ftp|file)://[-A-Za-z0-9+&@#/%?=~_|!:,.;]+[-A-Za-z0-9+&@#/%=~_|]'
pattern2='@([^@ ]*)'
pattern3='pic.twitter.com/.*'
text=re.sub(pattern1,' [URL]',text)
text=re.sub(pattern2,' [MENTION]',text)
text=re.sub(pattern3,' [PIC]',text)
text=re.sub('\xa0','',text)
return text
def split_data(data, issue_prepare=False):
n=len(data['text'])
idx=list(range(n))
random.shuffle(idx)
if issue_prepare:
train,dev,test={"text": [], "label": [],'issue':[]},{"text": [], "label": [],'issue':[]},{"text": [], "label": [],'issue':[]}
else:
train,dev,test={"text": [], "label": []},{"text": [], "label": []},{"text": [], "label": []}
for i in idx[:int(0.7*n)]:
train['text'].append(data['text'][i])
train['label'].append(data['label'][i])
if issue_prepare:
train['issue'].append(data['issue'][i])
for i in idx[int(0.7*n):int(0.9*n)]:
dev['text'].append(data['text'][i])
dev['label'].append(data['label'][i])
if issue_prepare:
dev['issue'].append(data['issue'][i])
for i in idx[int(0.9*n):]:
test['text'].append(data['text'][i])
test['label'].append(data['label'][i])
if issue_prepare:
test['issue'].append(data['issue'][i])
return train,dev,test
def split_data_for_fewshot(data, num_class, shot=1):
n=len(data['text'])
idx=list(range(n))
random.shuffle(idx)
train,dev,test={"text": [], "label": []},{"text": [], "label": []},{"text": [], "label": []}
label_set=list(range(num_class))
used=[]
label_dict={l:[] for l in label_set}
for i in idx:
for j in range(num_class):
if data['label'][i][0][j]==1:
label_dict[j].append(i)
print({k:len(label_dict[k]) for k in label_dict})
for l in label_dict:
if len(label_dict[l])>shot:
k,j=0,0
while k<shot:
if j<len(label_dict[l]) and label_dict[l][j] not in used:
train['text'].append(data['text'][label_dict[l][j]])
train['label'].append(data['label'][label_dict[l][j]])
used.append(label_dict[l][j])
k+=1
j+=1
else:
j+=1
if j>len(label_dict[l]):break
else:
for j in range(len(label_dict[l])):
if label_dict[l][j] not in used:
train['text'].append(data['text'][label_dict[l][j]])
train['label'].append(data['label'][label_dict[l][j]])
used.append(label_dict[l][j])
if len(label_dict[l])>2*shot:
k,j=0,0
while k<shot:
if j<len(label_dict[l]) and label_dict[l][j] not in used:
dev['text'].append(data['text'][label_dict[l][j]])
dev['label'].append(data['label'][label_dict[l][j]])
used.append(label_dict[l][j])
k+=1
j+=1
else:
j+=1
if j>len(label_dict[l]):break
else:
for j in range(len(label_dict[l])):
if label_dict[l][j] not in used:
dev['text'].append(data['text'][label_dict[l][j]])
dev['label'].append(data['label'][label_dict[l][j]])
used.append(label_dict[l][j])
return train,dev
def read_mfc_issue(path='/remote-home/xymou/Frame/framework/data/news/mfc/', data_type='article', issue='climate'):
if data_type=='article':
data=load_obj('article_data_multi',path)
elif data_type=='sentence':
data=load_obj('sentence_data_multi',path)
else:
raise Exception('Undefined data type! Choose from [article, sentence]')
return data[issue]
def read_mfc(path='/remote-home/xymou/Frame/framework/data/news/mfc/', data_type='article', issue='all',issue_prepare=False):
print('Reading data from MFC dataset!')
if issue!='all':
issues= [issue]
else:
issues= ['climate', 'deathpenalty', 'guncontrol', 'immigration', 'samesex', 'tobacco']
if issue_prepare:
data = {'text':[], 'label':[], 'issue':[]}
else:
data = {'text':[], 'label':[]}
for i in issues:
tmp = read_mfc_issue(path, data_type, issue=i)
data['text'].extend(tmp['text'])
data['label'].extend(tmp['label'])
if issue_prepare:
data['issue'].extend([all_issue_map[i]]*len(tmp['label']))
return data
def read_gvfc(path='/remote-home/xymou/Frame/framework/data/news/GVFC/GVFC/GVFC_headlines_and_annotations.xlsx'):
print('Reading data from GVFC dataset!')
df=pd.read_excel(path)
data,label=[],[]
for i in tqdm(range(len(df))):
text=df.loc[i,'news_title']
if df.loc[i,'Q1 Relevant']==1 and df.loc[i,'Q3 Theme1']!=99:
data.append(text.lower())
tmp=[df.loc[i,'Q3 Theme1']-1]
if df.loc[i,'Q3 Theme2']!=99:
tmp.append(df.loc[i,'Q3 Theme2']-1)
label.append(tmp)
return {"text": data, "label": label}
def read_twitter_issue(path='/remote-home/xymou/Frame/sample_test/Weakly/', issue='aca'):
tweet_processed=load_obj('tweet_processed', path)
label_map={
0:0,1:1,2:2,3:3,4:4,5:6,6:7,7:8,8:9,9:10,10:11,11:12,12:5,13:13
}
text,label=[],[]
for key in tweet_processed:
if issue in tweet_processed[key]['issue']:
tmp=tweet_processed[key]['text']
tmp=clean(tmp.lower())
tmp=re.sub('\#','',tmp)
res = [label_map[k-1] for k in tweet_processed[key]['frame'] if k not in [15,16,17]]
if len(res):
label.append(res)
text.append(tmp)
return {'text':text,'label':label}
def read_twitter(path='/remote-home/xymou/Frame/sample_test/Weakly/', issue='all', issue_prepare = True):
print('Reading data from Twitter-framing dataset!')
if issue == 'all':
issues=['aca','abort','immig','isis','guns','lgbt']
else:
issues = [issue]
if issue_prepare:
data = {'text':[], 'label':[], 'issue':[]}
else:
data = {'text':[], 'label':[]}
for i in issues:
tmp = read_twitter_issue(path, i)
for j in range(len(tmp['label'])):
data['text'].append(tmp['text'][j])
data['label'].append(tmp['label'][j])
if issue_prepare:
data['issue'].append(all_issue_map[i])
return data
def read_immi(path='/remote-home/xymou/Frame/framework/data/tweet/immi/' , issue='issue_specific', issue_prepare=False): #这里的issue其实是ftype
print('Reading data from immigration twitter dataset!')
text, label = [],[]
data = load_obj(issue, path)
if issue=='issue_generic':
labels = ['Cultural Identity','Capacity and Resources','Security and Defense','Quality of Life',
'Crime and Punishment','Policy Prescription and Evaluation','Morality and Ethics','External Regulation and Reputation','Health and Safety',
'Political Factors and Implications','Public Sentiment','Economic','Fairness and Equality','Legality, Constitutionality, Jurisdiction' ]
else:
labels = ['Victim: Global Economy','Threat: Fiscal', 'Hero: Cultural Diversity', 'Threat: Public Order', 'Threat: Jobs',
'Victim: Humanitarian', 'Threat: National Cohesion','Hero: Integration','Victim: Discrimination','Victim: War','Hero: Worker']
label_map={l:labels.index(l) for l in labels}
for i in range(len(data['text'])):
text.append(clean(data['text'][i].lower()))
label.append([label_map[k] for k in data['label'][i]])
if issue == 'issue_generic' and issue_prepare:
return {'text':text, 'label':label, 'issue':[all_issue_map['immigration']]*len(label)}
return {'text':text,'label':label}
def read_fora(path='/remote-home/xymou/Frame/framework/data/debate/issue_framing/data/dialogue/'):
print('Reading data from Fora dataset!')
text, label=[],[]
data = load_obj('fora_data',path)
for i in range(len(data['label'])):
text.append(data['text'][i])
label.append(data['label'][i])
return {'text':text, 'label':label}
data_func_map={
'mfc':read_mfc,
'gvfc':read_gvfc,
'twitter':read_twitter,
'immi':read_immi,
'fora':read_fora
}
def read_data(config):
dataset = config['dataset']
if dataset not in data_func_map:
raise KeyError('Current dataset is not mapped to data_read function! Please define the read data function for this dataset!')
func = data_func_map[dataset]
config.pop('dataset')
if dataset != 'mfc':
config.pop('data_type')
if dataset in ['gvfc','fora']:
config.pop('issue')
config['path'] = data_path[dataset]
return func(**config)
def convert_to_one_hot(label, label_num):
print('# of labels:', label_num)
res= []
for l in label:
tmp=[0]*label_num
for i in range(len(l)):
tmp[l[i]]=1
res.append(torch.tensor(tmp, dtype=torch.float32).view(1,-1))
return res
from torch.utils.data import Dataset
class mydata(Dataset):
def __init__(self, data, tokenizer, padding_idx=0, max_len=None):
self.text_lengths=[len(seq) for seq in data['text']]
self.max_len=max_len
if self.max_len is None:
self.max_len=max(self.text_lengths)
self.num_sequences = len(data["text"])
self.data=data
for i in range(len(data['text'])):
data['text'][i] = tokenizer.encode(data['text'][i], max_length=self.max_len)
def __len__(self):
return self.num_sequences
def __getitem__(self, index):
return {"text":self.data['text'][index],
"label":self.data['label'][index],
"issue":self.data['issue'][index]}
| xymou/Frame_Detection | data/load_data_multi.py | load_data_multi.py | py | 11,117 | python | en | code | 1 | github-code | 36 |
73421338023 |
import sys
import json
import buildCNNModel as cnn
from loadutils import retrieve_model, loadProcessedData, saveDevPredictionsData
from evaluation_helper import convert_raw_y_pred, get_f1, get_precision, get_recall
import numpy as np
def printUsage():
print("USAGE:\n\ntrain a CNN model")
print("All training data must have already been saved with loadutils.saveProcessedData()")
print("<model name> <hyper parameters file (JSON)> ")
def main():
"""
command line arguments:
<model name> <hyper parameters file (JSON)>
"""
if len(sys.argv) < 3:
printUsage()
return -1
modelName = sys.argv[1]
with open(sys.argv[2]) as fp:
hypers = json.load( fp)
trainX, trainX_capitals_cat, trainX_pos_cat, devX, devX_capitals_cat, \
devX_pos_cat, trainY_cat, devY_cat, embedding_matrix, train_decoderY, dev_decoderY = loadProcessedData()
# contruct training dicts
trainX_dict = {'x':trainX}
devX_list_arrayS = [devX]
trainY_dict = {'out_pred':trainY_cat}
devY_list_arrayS = [devY_cat]
# for final prediction
devX_dict = {'x':devX} #for model_eval only
if hypers["use_pos_tags"]:
trainX_dict["x_pos"] = trainX_pos_cat
devX_list_arrayS += [devX_pos_cat]
devX_dict["x_pos"] = devX_pos_cat #for model_eval only
if hypers['use_capitalization_info']:
trainX_dict["x_capital"] = trainX_capitals_cat
devX_list_arrayS += [devX_capitals_cat]
devX_dict["x_capital"] = devX_capitals_cat #for model_eval only
model = cnn.draw_cnn_model( hyper_param=hypers, embedding_matrix=embedding_matrix, verbose=True)
model = cnn.compile_cnn_model( hypers, model)
print( "Training Model:", modelName)
cnn.fit_model( hypers, model, modelName, trainX_dict, devX_list_arrayS, trainY_dict, devY_list_arrayS)
# save the last model in each epoch and its weights
with open('./result/'+ modelName + '_model_architecture.json', 'w') as f:
f.write(model.to_json())
model.save_weights('./result/' + modelName + '_weights_model.h5')
raw_y_pred = model.predict(devX_dict, verbose=1)
y_true = convert_raw_y_pred(devY_cat)
print ("prediction on dev set finished. raw 1-hot prediction has shape {}".format(raw_y_pred.shape))
y_pred = convert_raw_y_pred(raw_y_pred)
print ("prediction converted to class idx has shape {}".format(y_pred.shape))
precision = get_precision(y_true, y_pred)
recall = get_recall(y_true, y_pred)
f1_score = get_f1(y_true, y_pred)
print ("precision on dev = {}".format(precision))
print ("recall on dev = {}".format(recall))
print ("f1 score on dev = {}".format(f1_score))
# write out dev predictions
modelsDir = 'dev_Predictions'
print ("saving prediction data under directory: {}".format(modelsDir))
saveDevPredictionsData(modelName=modelName, raw_y_pred=raw_y_pred, raw_y_pred_decoder_embeddings=np.empty(0), y_pred=y_pred, modelsDir=modelsDir)
print ("please use loadutils.loadDevPredictionsData(modelName, modelsDir='dev_Predictions') to load :\n raw_y_pred, raw_y_pred_decoder_embeddings(empty array for CNN), y_pred")
if __name__ == '__main__':
main()
| Chucooleg/CapsNet_for_NER | code/trainCNNModel.py | trainCNNModel.py | py | 3,296 | python | en | code | 10 | github-code | 36 |
28798067661 | # By: Jared Donnelly
# CS 110 - Prof. Kevin Ryan
# I pledge my Honor that I have abided by the Stevens Honor System
def main():
print("The following program accepts numerical inputs and sums them")
print("Please list all the numbers you would like to enter in a list separated by spaces")
sumables = input("Please being inputting your numbers below, then hit enter when you're finished: \n")
finalSum = 0
sumables = sumables.split()
for num in range(len(sumables)):
finalSum = finalSum + int(sumables[num])
print("The total sum of the list is:", finalSum)
main()
| Eric-Wonbin-Sang/CS110Manager | 2020F_hw5_submissions/donnellyjared/DonnellyJaredP2-1.py | DonnellyJaredP2-1.py | py | 605 | python | en | code | 0 | github-code | 36 |
34535894055 | #!/usr/bin/python
# open a microphone in pyAudio and get its FFT spectrum
import pyaudio
import numpy as np
FORMAT = pyaudio.paInt16
CHANNELS = 2
RATE = 44100
INPUT_BLOCK_TIME = 0.08
GLIDING_DIVIDER = 4
INPUT_FRAMES_PER_BLOCK = int(RATE*INPUT_BLOCK_TIME/GLIDING_DIVIDER)
soundtype = np.dtype([('l',np.int16),('r',np.int16)])
class Listener(object):
def __init__(self):
self.pa = pyaudio.PyAudio()
self.stream = self.open_mic_stream()
raw = self.listen()
for i in range(1,GLIDING_DIVIDER):
raw += self.listen()
stereodata = np.fromstring(raw,soundtype)
self.buf = (stereodata['l'] + stereodata['r'])/2
def stop(self):
self.stream.close()
def open_mic_stream( self ):
stream = self.pa.open( format = FORMAT,
channels = CHANNELS,
rate = RATE,
input = True,
input_device_index = None,
frames_per_buffer = INPUT_FRAMES_PER_BLOCK)
return stream
def listen(self):
try:
block = self.stream.read(INPUT_FRAMES_PER_BLOCK)
except IOError:
return
return block
# Returns the FFT of a sound sample recorded over INPUT_BLOCK_TIME.
# This is a numpy array of RATE*INPUT_BLOCK_TIME/2 values.
# The i-th element represents the frequency i/INPUT_BLOCK_TIME
def get_spectrum(self):
raw = self.listen()
stereodata = np.fromstring(raw,soundtype)
monodata = (stereodata['l'] + stereodata['r'])/2
self.buf[:-len(monodata)] = self.buf[len(monodata):]
self.buf[-len(monodata):] = monodata
return abs(np.fft.rfft(self.buf))
| maralorn/pythonlights | sound.py | sound.py | py | 1,790 | python | en | code | 0 | github-code | 36 |
74784438504 | import torch
import torch.nn as nn
class MLP(nn.Module):
def __init__(self, input_dim : int, output_dim : int, hidden_dim : list,
num_layers:int, dropout_rate:float=0.):
super(MLP, self).__init__()
self.input_dim = input_dim
self.output_dim = output_dim
self.hidden_dim = hidden_dim
self.dropout_rate = dropout_rate
self.num_layers = num_layers
# Create input layer
self.input_layer = nn.Linear(input_dim, hidden_dim[0])
self.hidden_layers = [nn.Linear(hidden_dim[n+1], hidden_dim[n+2]) for n in range(num_layers-2)]
self.output_layer = nn.Linear(hidden_dim[-1], output_dim)
self.relu = nn.ReLU()
self.droput = nn.DropOut(dropout_rate)
def forward(self, x):
outputs = self.relu(self.input_layer(x))
for h_layer in self.hidden_layers:
outputs = self.relu(self.dropout(h_layer(outputs)))
outputs = self.output_layer(outputs)
return outputs
| GarfieldCK/AI-module | ai_modules/models/module.py | module.py | py | 1,034 | python | en | code | 0 | github-code | 36 |
27559465226 | import tkinter as tk
class Crossword:
def __init__(self, window):
self.window = window
self.window.geometry('800x800')
self.window.title('Mazal Tov!')
self.grid_size = 16
self.cell_size = 2
self.last_et_id = 0
self.entry_texts = {}
title_frame = tk.Frame(self.window)
title_frame.grid(row=0, column=0, sticky='nsew')
cw_frame = tk.Frame(self.window)
cw_frame.grid(sticky='nsew')
for i in range(self.grid_size):
self.window.grid_rowconfigure(i, weight=1)
self.window.grid_columnconfigure(i, weight=1)
cw_frame.grid_rowconfigure(i, weight=1)
cw_frame.grid_columnconfigure(i, weight=1)
# self._fill_grid(title_frame, 1, self.grid_size*self.cell_size, color='#2fff4f')
label = tk.Label(title_frame, text='תשבץ יום הולדת'[-1::-1])
label.grid(row=0, column=0, columnspan=10)
self._fill_grid(cw_frame, self.grid_size)
self._create_word(cw_frame, 0, 0, 15, 'v')
self._create_word(cw_frame, 1, 1, 5, 'v')
self._create_word(cw_frame, 3, 0, 5, 'h')
self._create_word(cw_frame, 8, 3, 8, 'h')
def _fill_grid(self, frame, height, width=None, color='#000000'):
if not width:
width = height
for i in range(height):
for j in range(width):
label = tk.Label(frame, text=' ' * self.cell_size,
background=color)
label.grid(row=i, column=j)
def _create_word(self, frame, start_row, start_col, length, dir='h'):
for i in range(length):
self.last_et_id += 1
sv = tk.StringVar()
self.entry_texts[self.last_et_id] = sv
entry = tk.Entry(frame, width=self.cell_size, textvariable=sv)
sv.trace("w", lambda *args: self._validate(sv))
if dir == 'h':
entry.grid(row=start_row, column=start_col+i)
elif dir == 'v':
entry.grid(row=start_row+i, column=start_col)
def _validate(self, entry_text):
self._character_limit(entry_text)
self._letter_check(entry_text)
@staticmethod
def _character_limit(entry_text):
input = entry_text.get()
if len(input) > 0:
entry_text.set(input[-1])
@staticmethod
def _letter_check(entry_text):
input = entry_text.get()
if not input.isalpha():
entry_text.set('')
if __name__ == '__main__':
window = tk.Tk()
gui = Crossword(window)
window.mainloop()
| Yuvashkenazi/crossword | plan_bet/mainb.py | mainb.py | py | 2,623 | python | en | code | 0 | github-code | 36 |
9571225314 | from wq.db import rest
from .models import Site, AssessmentType, Assessment, Map
from .serializers import AssessmentTypeSerializer, AssessmentSerializer, MapSerializer
from django.conf import settings
rest.router.register_model(
Site,
fields="__all__",
cache="none",
map=[{
'mode': 'list',
'autoLayers': True,
}, {
'mode': 'detail',
'autoLayers': True,
}, {
'mode': 'edit',
'autoLayers': True,
}],
# partial=True,
)
rest.router.register_model(
AssessmentType,
serializer=AssessmentTypeSerializer,
fields="__all__",
)
# this could enable filtering of own assessments
def user_filter(qs, request):
if request.user.is_authenticated():
return qs.filter(user=request.user)
else:
return qs.none()
rest.router.register_model(
Assessment,
serializer=AssessmentSerializer,
fields="__all__",
cache="none",
map=[{
'mode': 'list',
'autoLayers': True,
}, {
'mode': 'detail',
'autoLayers': True,
}],
)
rest.router.register_model(
Map,
serializer=MapSerializer,
fields="__all__",
)
rest.router.add_page('index', {'url': ''})
rest.router.add_page('locate', {
'url': 'locate',
'map': {'layers': []},
'locate': True
})
rest.router.set_extra_config(
mapbox_token=settings.MAPBOX_TOKEN,
)
| erikriver/disasters | db/assessments/rest.py | rest.py | py | 1,386 | python | en | code | 0 | github-code | 36 |
28923184781 | #10655 : 마라톤 1
"""
USACO 2014 December Prob 1
O(N^2) is impossible. Use Partial Sum instead.
"""
import sys
input= sys.stdin.readline
#Define manhatten distance
def get_dist(prev, after):
x1, y1 = prev
x2, y2 = after
dist = abs(x1-x2)+ abs(y1-y2)
return dist
N = int(input().rstrip())
coordinates = [(-1,-1) for _ in range(N)]
psum = [0]
dpsum = [0]
dist_sum = 0 ; dist2_sum = 0 ; minimum = int(1e10)
for case in range(N):
curr_node = list(map(int,input().rstrip().split()))
coordinates[case] = curr_node
for case in range(N-1):
curr_node = coordinates[case] ; next_node = coordinates[case+1]
dist_sum += get_dist(curr_node, next_node)
psum.append(dist_sum)
if case>N-3: continue
next2_node = coordinates[case+2]
dist2_sum += get_dist(curr_node, next2_node)
dpsum.append(dist2_sum)
#Minimum check
for i in range(1,N-1):
candidate = (dpsum[i] - dpsum[i-1])-(psum[i+1]-psum[i-1])
if candidate<= minimum : minimum = candidate
answer = dist_sum + minimum
print(answer) | GuSangmo/BOJ_practice | BOJ/10655.py | 10655.py | py | 1,052 | python | en | code | 0 | github-code | 36 |
16049471546 | import subprocess
import os
import logging
import platform
from tqdm import tqdm
from ffcuesplitter.exceptions import FFMpegError, FFCueSplitterError
from ffcuesplitter.utils import makeoutputdirs, Popen
if not platform.system() == 'Windows':
import shlex
class FFMpeg:
"""
FFMpeg is the base class interface for FFCueSplitter.
It represents FFmpeg command and arguments with their
sub-processing. Note: Opus sample rate is always 48kHz for
fullband audio.
"""
DATACODECS = {'wav': 'pcm_s16le -ar 44100',
'flac': 'flac -ar 44100',
'ogg': 'libvorbis -ar 44100',
'opus': 'libopus',
'mp3': 'libmp3lame -ar 44100',
}
def __init__(self, **kwargs):
"""
Constructor
"""
self.kwargs = kwargs
self.outsuffix = None
# -------------------------------------------------------------#
def codec_setup(self, sourcef):
"""
Returns codec arg based on given format
Raises:
FFCueSplitterError from KeyError
if an unsupported format is given.
Returns:
tuple(codec, outsuffix)
"""
if self.kwargs['outputformat'] == 'copy':
self.outsuffix = os.path.splitext(sourcef)[1].replace('.', '')
codec = '-c copy'
else:
try:
self.outsuffix = self.kwargs['outputformat']
codec = f'-c:a {FFMpeg.DATACODECS[self.outsuffix]}'
except KeyError as error:
msgerr = f"Unsupported format '{self.outsuffix}'"
raise FFCueSplitterError(f'{msgerr}') from error
return codec, self.outsuffix
# -------------------------------------------------------------#
def commandargs(self, audiotracks: (list, tuple)) -> dict:
"""
Builds the FFmpeg command argument string and assign
the corresponding duration and name to each audio track.
It expects a list type object.
Returns:
dict(recipes)
"""
data = []
meters = {'tqdm': '-progress pipe:1 -nostats -nostdin', 'standard': ''}
for track in audiotracks:
codec, suffix = self.codec_setup(track["FILE"])
metadata = {'ARTIST': track.get('PERFORMER', ''),
'ALBUM': track.get('ALBUM', ''),
'TITLE': track.get('TITLE', ''),
'TRACK': (str(track['TRACK_NUM'])
+ '/' + str(len(audiotracks))),
'DISCNUMBER': track.get('DISCNUMBER', ''),
'GENRE': track.get('GENRE', ''),
'DATE': track.get('DATE', ''),
'COMMENT': track.get('COMMENT', ''),
'DISCID': track.get('DISCID', ''),
}
cmd = f'"{self.kwargs["ffmpeg_cmd"]}" '
cmd += f' -loglevel {self.kwargs["ffmpeg_loglevel"]}'
cmd += f" {meters[self.kwargs['progress_meter']]}"
fpath = os.path.join(self.kwargs["dirname"], track["FILE"])
cmd += f' -i "{fpath}"'
cmd += f" -ss {round(track['START'] / 44100, 6)}" # ff to secs
if 'END' in track:
cmd += f" -to {round(track['END'] / 44100, 6)}" # ff to secs
for key, val in metadata.items():
cmd += f' -metadata {key}="{val}"'
cmd += f' {codec}'
cmd += f" {self.kwargs['ffmpeg_add_params']}"
cmd += ' -y'
num = str(track['TRACK_NUM']).rjust(2, '0')
name = f'{num} - {track["TITLE"]}.{suffix}'
cmd += f' "{os.path.join(self.kwargs["tempdir"], name)}"'
args = (cmd, {'duration': track['DURATION'], 'titletrack': name})
data.append(args)
return {'recipes': data}
# --------------------------------------------------------------#
def command_runner(self, arg, secs):
"""
Redirect to required runner. Note: tqdm command args
is slightly different from standard command args because
tqdm adds `-progress pipe:1 -nostats -nostdin` to arguments,
see `meters` on `commandargs`.
This method must return if the `dry` keyword arg is true.
"""
if self.kwargs['progress_meter'] == 'tqdm':
cmd = arg if platform.system() == 'Windows' else shlex.split(arg)
if self.kwargs['dry'] is True:
return cmd
self.run_ffmpeg_command_with_progress(cmd, secs)
elif self.kwargs['progress_meter'] == 'standard':
cmd = arg if platform.system() == 'Windows' else shlex.split(arg)
if self.kwargs['dry'] is True:
return cmd
self.run_ffmpeg_command(cmd)
return None
# --------------------------------------------------------------#
def run_ffmpeg_command_with_progress(self, cmd, seconds):
"""
Run FFmpeg sub-processing showing a tqdm progress meter
for each loop. Also writes a log file to the output
destination directory.
Usage for get elapsed seconds:
progbar = tqdm(total=round(seconds), unit="s", dynamic_ncols=True)
progbar.clear()
previous_s = 0
s_processed = round(int(output.split('=')[1]) / 1_000_000)
s_increase = s_processed - previous_s
progbar.update(s_increase)
previous_s = s_processed
Raises:
FFMpegError
Returns:
None
"""
makeoutputdirs(self.kwargs['outputdir']) # Make dirs for files dest.
progbar = tqdm(total=100,
unit="s",
dynamic_ncols=True
)
progbar.clear()
sep = (f'\nFFcuesplitter Command: {cmd}\n'
f'=======================================================\n\n')
try:
with open(self.kwargs['logtofile'], "a", encoding='utf-8') as log:
log.write(sep)
with Popen(cmd,
stdout=subprocess.PIPE,
stderr=log,
bufsize=1,
encoding='utf8',
universal_newlines=True) as proc:
for output in proc.stdout:
if "out_time_ms" in output.strip():
s_processed = int(output.split('=')[1]) / 1_000_000
percent = s_processed / seconds * 100
progbar.update(round(percent) - progbar.n)
if proc.wait(): # error
logging.error("Popen proc.wait() Exit status %s",
proc.wait())
progbar.close()
raise FFMpegError(f"ffmpeg FAILED, See log details: "
f"'{self.kwargs['logtofile']}'")
except (OSError, FileNotFoundError) as excepterr:
progbar.close()
raise FFMpegError(excepterr) from excepterr
except KeyboardInterrupt as err:
# proc.kill()
progbar.close()
proc.terminate()
msg = "[KeyboardInterrupt] FFmpeg process failed."
raise FFMpegError(msg) from err
progbar.close()
# --------------------------------------------------------------#
def run_ffmpeg_command(self, cmd):
"""
Run FFmpeg sub-processing with stderr output to console.
The output depending on the ffmpeg loglevel option.
Raises:
FFMpegError
Returns:
None
"""
makeoutputdirs(self.kwargs['outputdir']) # Make dirs for output files
sep = (f'\nFFcuesplitter Command: {cmd}\n'
f'=======================================================\n\n')
with open(self.kwargs['logtofile'], "a", encoding='utf-8') as log:
log.write(sep)
try:
subprocess.run(cmd, check=True, shell=False, encoding='utf8',)
except FileNotFoundError as err:
raise FFMpegError(f"{err}") from err
except subprocess.CalledProcessError as err:
raise FFMpegError(f"ffmpeg FAILED: {err}") from err
except KeyboardInterrupt as err:
msg = "[KeyboardInterrupt] FFmpeg process failed."
raise FFMpegError(msg) from err
| jeanslack/FFcuesplitter | ffcuesplitter/ffmpeg.py | ffmpeg.py | py | 8,582 | python | en | code | 21 | github-code | 36 |
7706234863 | from pathlib import Path
from ruamel.yaml import YAML
yaml = YAML()
def get_tasks_files():
matches = []
matches.extend(list(Path(".").rglob("tasks/*.yaml")))
matches.extend(list(Path(".").rglob("tasks/*.yml")))
matches.extend(list(Path(".").rglob("handlers/*.yaml")))
matches.extend(list(Path(".").rglob("handlers/*.yml")))
return matches
# Take a list as input, for each item find a key that contains dots, split the key
# by dots and if the resulting list has 3 items, return the key
def get_module_from_list(data: list):
modules: list[str] = []
for item in data:
for key in item:
if "." not in key:
continue
elif len(key.split(".")) == 3:
modules.append(key)
break
else:
print(f"module not found for task {item.get('name')}")
return modules
# Take a Path object as input, read the content, parse it with ruamel.yaml
# and for each dict in the resulting list, return the key that contains dots
def get_modules_from_file(file: Path):
modules: list[str] = []
if not file.is_file():
return modules
with open(file, "r") as f:
data = yaml.load(f)
if not data:
return modules
return get_module_from_list(data)
# find all modules used in tasks and handlers
def get_modules():
modules = []
for file in get_tasks_files():
modules.extend(get_modules_from_file(file))
return modules
# Take a list as input, split each item by dots and return a set of the first 2 items
def get_collections(modules: list[str]):
collections = set()
for module in modules:
collections.add(".".join(module.split(".")[:2]))
return collections
print(get_collections(get_modules()))
| jonsible/iac | find_modules.py | find_modules.py | py | 1,795 | python | en | code | 0 | github-code | 36 |
29004603171 | from meeseeks.context import ChangedRoomMessageCtx, LoginCtx
from meeseeks.serializers import ContextFactory, ContextSerializer
from tests.base import BaseTestClass
class TestContextFactory(BaseTestClass):
"""Tests of ContextFactory class. """
def test_resister(self):
ctx_factory = ContextFactory()
ctx_factory.register('changed', 'stream-room-messages', ChangedRoomMessageCtx)
self.assertEqual(ctx_factory._creators,
{('changed', 'stream-room-messages'): ChangedRoomMessageCtx})
def test_get_serializer(self):
"""Test of success get_serializer method. """
serializable = {
'msg': 'result',
'id': 'login',
'result': {
'id': 'ucPgkuQptW4TTqYH2',
'token': '5XGsD6i9N4c1qyzIH7a4zigOzj9tBzEnxa2Cu7PkQs3',
'tokenExpires': {'$date': 1659901043466},
'type': 'password'
}
}
ctx_factory = ContextFactory()
login_ctx = LoginCtx
login_ctx._raw_context = serializable
ctx_factory.register('result', 'login', LoginCtx)
result = ctx_factory.get_serializer(serializable, 'result', 'login')
self.assertIsInstance(result, LoginCtx)
self.assertEqual(result._raw_context, (serializable, ))
def test_fail_get_serializer(self):
"""Test of success get_serializer method. """
serializable = {}
ctx_factory = ContextFactory()
with self.assertRaises(ValueError):
ctx_factory.get_serializer(serializable, 'test', 'test')
| tolstoyevsky/meeseeks | tests/test_serializers.py | test_serializers.py | py | 1,607 | python | en | code | 1 | github-code | 36 |
769348877 | from get_notes import get_notes
from model import create_network
import pandas as pd
import numpy
import json
from keras.utils import np_utils
from keras.callbacks import ModelCheckpoint
def train_network():
notes = get_notes()
with open("data/notes.json", "w") as filename:
json.dump(notes, filename)
notes_df = pd.DataFrame(notes, columns=['pitch', 'duration'])
pitches = notes_df['pitch']
durations = notes_df['duration']
pitch_vocab = sorted(set(item for item in pitches))
duration_vocab = sorted(set(item for item in durations))
with open("data/pitch_vocab.json", "w") as filename:
json.dump(pitch_vocab, filename)
with open("data/duration_vocab.json", "w") as filename:
json.dump(duration_vocab, filename)
# print("notes_df:")
# print(notes_df)
look_back = 4
in_pitches, in_durations, out_pitches, out_durations = prepare_sequences(notes_df, look_back)
model = create_network(timesteps=look_back,
pitch_vocab_size=len(pitch_vocab),
duration_vocab_size=len(duration_vocab))
model.summary()
train(model, in_pitches, in_durations, out_pitches, out_durations)
def prepare_sequences(notes, look_back):
pitches = notes['pitch']
durations = notes['duration']
pitch_vocab = sorted(set(item for item in pitches))
duration_vocab = sorted(set(item for item in durations))
print("pitch_vocab:")
print(pitch_vocab)
print("duration_vocab:")
print(duration_vocab)
pitch_to_int = dict((note, number) for number, note in enumerate(pitch_vocab))
duration_to_int = dict((note, number) for number, note in enumerate(duration_vocab))
pitches_in = []
durations_in = []
pitches_out = []
durations_out = []
for i in range(notes.shape[0] - look_back):
pitch_sequence_in = pitches[i:(i + look_back)]
pitch_sequence_out = pitches[i + look_back]
duration_sequence_in = durations[i:(i + look_back)]
duration_sequence_out = durations[i + look_back]
pitches_in.append([pitch_to_int[char] for char in pitch_sequence_in])
pitches_out.append(pitch_to_int[pitch_sequence_out])
durations_in.append([duration_to_int[char] for char in duration_sequence_in])
durations_out.append(duration_to_int[duration_sequence_out])
pitches_in = numpy.array(pitches_in)
durations_in = numpy.array(durations_in)
pitches_out = numpy.array(pitches_out)
durations_out = numpy.array(durations_out)
pitches_in = np_utils.to_categorical(pitches_in)
durations_in = np_utils.to_categorical(durations_in)
pitches_out = np_utils.to_categorical(pitches_out)
durations_out = np_utils.to_categorical(durations_out)
# print('\npitches_in:')
# print(pitches_in)
#
# print('\npitches_out:')
# print(pitches_out)
#
# print('\ndurations_in:')
# print(durations_in)
#
# print('\ndurations_out:')
# print(durations_out)
return (pitches_in, durations_in, pitches_out, durations_out)
def train(model, pitch_in, duration_in, pitch_out, duration_out):
""" train the neural network """
filepath = "weights/weights-improvement-{epoch:02d}-{loss:.4f}-bigger.hdf5"
checkpoint = ModelCheckpoint(
filepath,
monitor='loss',
verbose=0,
save_best_only=True,
mode='min'
)
callbacks_list = [checkpoint]
model.fit([pitch_in, duration_in], [pitch_out, duration_out], epochs=20, batch_size=16, callbacks=callbacks_list)
if __name__ == '__main__':
train_network()
| tanelxen/riff-composer | train.py | train.py | py | 3,628 | python | en | code | 0 | github-code | 36 |
10227649807 | """The page module holds the Page class for the web page factory"""
from pathlib import Path
from typing import List
from factory.elements import Element
class Page:
"""Page class holds elements of a web page"""
def __init__(self, name: str, route: str, elements: List[Element]) -> None:
"""Create the Page instance"""
self.name = name
self.route = Path(route)
self.elements = elements
@property
def html(self) -> str:
"""Compile HTML from each of the page elements"""
out = ["<!doctype html>"]
for element in self.elements:
out += element.html
return "\n".join(out)
@property
def html_path(self) -> Path:
"""Return the html path for the page"""
return Path("templates").joinpath(self.route).with_suffix(".html")
def to_html(self) -> None:
"""Write the Page's HTML out"""
if not self.html_path.parent.exists():
self.html_path.parent.mkdir()
with open(self.html_path, mode="w", encoding="utf-8") as outfile:
outfile.writelines(self.html)
| brianjstroh/bstroh | factory/page.py | page.py | py | 1,113 | python | en | code | 1 | github-code | 36 |
36619014009 | # pylint: disable=not-callable, no-member, invalid-name, line-too-long, wildcard-import, unused-wildcard-import, missing-docstring
import torch
import e3nn.point.data_helpers as dh
from e3nn import rs
import numpy as np
torch.set_default_dtype(torch.float64)
def test_data_helpers():
N = 7
lattice = torch.randn(3, 3)
pos = torch.randn(N, 3)
Rs_in = [(3, 0), (1, 1)]
x = torch.randn(N, rs.dim(Rs_in))
r_max = 1
dh.neighbor_list_and_relative_vec_lattice(pos, lattice, r_max)
dh.DataPeriodicNeighbors(x, Rs_in, pos, lattice, r_max)
dh.neighbor_list_and_relative_vec(pos, r_max)
dh.DataNeighbors(x, Rs_in, pos, r_max)
def test_silicon_neighbors():
lattice = torch.tensor([
[3.34939851, 0. , 1.93377613],
[1.11646617, 3.1578432 , 1.93377613],
[0. , 0. , 3.86755226]
])
coords = torch.tensor([
[0. , 0. , 0. ],
[1.11646617, 0.7894608 , 1.93377613]
])
r_max = 2.5
edge_index, edge_attr = dh.neighbor_list_and_relative_vec_lattice(coords, lattice, r_max=r_max)
edge_index_true = torch.LongTensor([
[0, 0, 0, 0, 0, 1, 1, 1, 1, 1],
[0, 1, 1, 1, 1, 1, 0, 0, 0, 0]
])
torch.allclose(edge_index, edge_index_true)
def test_get_edge_edges_and_index():
edge_index = torch.LongTensor([
[0, 0, 0, 1, 1, 1, 2, 2, 2],
[0, 1, 2, 0, 1, 2, 0, 1, 2]
])
edge_index_dict_asym, _, edge_edge_index_asym = dh.get_edge_edges_and_index(edge_index, symmetric_edges=False)
edge_index_dict_symm, _, edge_edge_index_symm = dh.get_edge_edges_and_index(edge_index, symmetric_edges=True)
check1 = {(0, 0): 0, (0, 1): 1, (0, 2): 2, (1, 0): 3, (1, 1): 4, (1, 2): 5, (2, 0): 6, (2, 1): 7, (2, 2): 8}
check2 = {(0, 0): 0, (0, 1): 1, (0, 2): 2, (1, 1): 3, (1, 2): 4, (2, 2): 5}
assert edge_index_dict_asym == check1
assert edge_index_dict_symm == check2
assert np.max(list(edge_index_dict_asym.values())) == np.max(edge_edge_index_asym)
assert np.max(list(edge_index_dict_symm.values())) == np.max(edge_edge_index_symm)
def test_initialize_edges():
edge_index = torch.LongTensor([[0, 0, 0, 1, 1, 1, 2, 2, 2], [0, 1, 2, 0, 1, 2, 0, 1, 2]])
edge_index_dict, _, _ = dh.get_edge_edges_and_index(edge_index, symmetric_edges=True)
_, Rs = dh.initialize_edges(torch.ones(5, 1), [(1, 0, 1)], torch.randn(5, 3), edge_index_dict, 2, symmetric_edges=True)
assert Rs == [(1, 0, 1), (1, 1, -1), (1, 2, 1)]
_, Rs = dh.initialize_edges(torch.ones(5, 3), [(1, 1, -1)], torch.randn(5, 3), edge_index_dict, 0, symmetric_edges=True)
assert Rs == [(1, 0, 1), (1, 2, 1)]
edge_index_dict, _, _ = dh.get_edge_edges_and_index(edge_index, symmetric_edges=False)
_, Rs = dh.initialize_edges(torch.ones(5, 3), [(1, 1, -1)], torch.randn(5, 3), edge_index_dict, 0, symmetric_edges=False)
assert Rs == [(1, 0, 1), (1, 1, 1), (1, 2, 1)]
def test_DataEdgeNeighbors():
square = torch.tensor(
[[0., 0., 0.], [1., 0., 0.], [1., 1., 0.], [0., 1., 0.]]
)
square -= square.mean(-2)
data = dh.DataEdgeNeighbors(torch.ones(4, 1), [(1, 0, 1)], square, 1.5, 2)
assert list(data.edge_x.shape) == [16, 9]
assert list(data.edge_edge_index.shape) == [2, 64]
assert list(data.edge_edge_attr.shape) == [64, 3]
def test_DataEdgePeriodicNeighbors():
pos = torch.ones(1, 3) * 0.5
lattice = torch.eye(3)
dh.DataEdgePeriodicNeighbors(torch.ones(1, 1), [(1, 0, 1)], pos, lattice, 1.5, 2)
| clementbernardd/ares_fork | lib/ares/e3nn_ares/tests/point/data_helpers_test.py | data_helpers_test.py | py | 3,520 | python | en | code | 0 | github-code | 36 |
26608837899 | # 一万个小时学习Python
# 持续付出 持续输出
# 开发时间:2022/5/21 22:00
#函数内的变量叫局部变量,函数外的变量叫全局变量
#用globe可以把局部变量变成全部变量
def fun(a,b):
global c #1:把局部变量变成全局变量
c=a+b
return c
print(fun(20,30))
print(c)
| wuji8626/py_code | chap12/9variable.py | 9variable.py | py | 327 | python | zh | code | 0 | github-code | 36 |
34014676895 | from flask import Flask, request, abort, render_template, make_response
import json, requests
from StringIO import StringIO
from time import sleep
try:
from metatool import metatool
except ImportError:
import metatool
try:
from metatool import viz
except ImportError:
import viz
try:
from metatool import config
except ImportError:
import config
try:
from metatool import models
except ImportError:
import models
try:
from metatool import generate_test_data
except ImportError:
import generate_test_data
app = Flask(__name__)
@app.route("/")
def index():
return render_template('index.html', baseurl=config.BASE_URL)
@app.route("/validate", methods=["POST", "GET"])
def validate():
mt = request.values.get("modeltype")
f = None
if request.method == "POST":
f = request.files.get("model")
elif request.method == "GET":
url = request.values.get("url")
resp = requests.get(url)
f = StringIO(resp.text)
fieldsets = metatool.validate_model(mt, f)
html = metatool.fieldsets_to_html(fieldsets)
return render_template("results.html", tables=html, baseurl=config.BASE_URL)
@app.route("/cerifeye", methods=["POST", "GET"])
def cerifeye():
mt = request.values.get("modeltype")
f = None
if request.method == "POST":
f = request.files.get("model")
elif request.method == "GET":
url = request.values.get("url")
resp = requests.get(url)
f = StringIO(resp.text)
nodes = viz.get_nodes(mt, f)
return render_template("cerifview.html", nodes=json.dumps(nodes), baseurl=config.BASE_URL)
@app.route("/visualise", methods=["POST", "GET"])
def visualise():
mt = request.values.get("modeltype")
f = None
if request.method == "POST":
f = request.files.get("model")
elif request.method == "GET":
url = request.values.get("url")
resp = requests.get(url)
f = StringIO(resp.text)
nodes = viz.get_nodes(mt, f)
return render_template("viz.html", nodes=json.dumps(nodes), baseurl=config.BASE_URL)
@app.route("/acat", methods=["GET"])
def acat_facetview():
return render_template("acat_search.html", es_host=config.ES_HOST, es_index='acat')
@app.route("/aggregate/publications", methods=["GET"])
def publications_facetview():
return render_template("aggregate_publications.html", es_host=config.ES_HOST, es_index='ukriss')
@app.route("/aggregate/publications/generate", methods=["GET"])
@app.route("/aggregate/publications", methods=["POST"])
def generate_publications():
# make sure index is created and has right mappings
init_status_code = models.Publication.initialise_index()
if init_status_code != 200:
return '''Elasticsearch has a problem initialising the {0} index, it returned a {1} HTTP status code.
Check the elasticsearch log for exceptions.'''.format(models.Publication.es_index, init_status_code)
how_many = 1000
generate_test_data.generate_and_index(how_many)
models.Publication.refresh()
sleep(1) # give ES a bit of time to do the refresh
return "Generated {0} publication records".format(how_many)
if __name__ == "__main__":
app.run(host='0.0.0.0', debug=True, port=5007)
| CottageLabs/metatool | metatool/web.py | web.py | py | 3,276 | python | en | code | 1 | github-code | 36 |
29935238101 | import numpy as np
import matplotlib.pyplot as plt
import json
import matplotlib as mpl
import matplotlib.cm as cm
import cmocean
from colormaputil import truncate_colormap
def getMaxBracket(minYear, maxYear, data):
curMax = 0
for year in range(minYear, maxYear):
ranges = data[str(year)]['ranges']
for num in ranges:
if num > curMax:
curMax = num
return curMax
def getColour(year, i, m, data):
return m.to_rgba(data[year]['ranges'][i])
maxYear = 2018
minYear = 1985
json_data = open('canada.json').read()
data = json.loads(json_data)
norm = mpl.colors.Normalize(vmin=0, vmax=getMaxBracket(minYear, maxYear, data))
cmap = truncate_colormap(cmocean.cm.phase, 0.35, 1)
m = cm.ScalarMappable(norm=norm, cmap=cmap)
ind = np.arange(maxYear-minYear)
for year in range(minYear, maxYear):
before = [0] * (year - minYear)
after = [0] * (maxYear - year-1)
rates = data[str(year)]['rates']
previous = 0
for i in range(len(rates)):
height = [rates[i]-previous]
plt.bar(ind, tuple(before + height + after), 1,
color=getColour(str(year), i, m, data), bottom=previous, linewidth=0)
previous = rates[i]
m._A = []
small = 9
medium = 11
large = 12
clb = plt.colorbar(m, format='>$%d', ticks=[a for a in range(0, getMaxBracket(minYear, maxYear, data), 10000)])
clb.set_label('Tax Bracket (CAD):', labelpad=-40, y=1.06, rotation=0, fontsize=large)
clb.ax.tick_params(labelsize=medium)
plt.xlim([0, maxYear-minYear])
plt.title('% Personal Income Federally Taxed in Canada, 1985-2017', fontsize=large)
plt.ylabel('% Tax\nApplied', fontsize=large, rotation=0, labelpad=25)
plt.xticks(ind, [a for a in range(minYear, maxYear)], rotation=60, fontsize=small, y=0.01)
plt.yticks(fontsize=medium)
plt.gca().yaxis.grid(which='major', linestyle='-', linewidth=0.8)
plt.gca().xaxis.grid(which='major', linestyle='-', linewidth=0.5)
plt.gca().yaxis.grid(which='minor', linestyle='-', linewidth=0)
plt.gca().xaxis.grid(False, which='minor')
plt.gca().tick_params(axis='x', which='both', length=0)
plt.xlabel("github.com/rosslh/historical-tax-rate-visualizor", fontsize=small, color='#777777')
plt.minorticks_on()
plt.savefig('figure.png', dpi=400)
| rosslh/Historical-Tax-Rate-Visualizor | plot.py | plot.py | py | 2,244 | python | en | code | 0 | github-code | 36 |
41709462249 | import unittest
import json
from django.test import TestCase
from datetime import datetime
from utente.models import Utente, Prodotto, ProdottoCarrello, Carrello, Pagamento, Ordine
from vetrine.models import Vetrina, VetrinaAmministratore, ResocontoVendite
# test della creazione di un utente e verifica del relativo carrello associato #test ok
class UtenteTest(TestCase):
def test_save_creates_carrello(self):
utente = Utente.objects.create(username='testuser', email='test@example.com')
carrello = Carrello.objects.get(possessore=utente)
self.assertEqual(carrello.possessore, utente)
# test creazione di un prodotto e verifica della quantità #test ok
class ProdottoTest(TestCase):
def setUp(self):
self.prodotto = Prodotto.objects.create(
nome='Prodotto di test',
codice_seriale=12345,
tipologia='Test',
descrizione='Descrizione di test',
prezzo=9.99,
disponibilita=10
)
def test_creazione_prodotto(self):
self.assertEqual(self.prodotto.nome, 'Prodotto di test')
self.assertEqual(self.prodotto.codice_seriale, 12345)
self.assertEqual(self.prodotto.tipologia, 'Test')
self.assertEqual(self.prodotto.descrizione, 'Descrizione di test')
self.assertEqual(self.prodotto.prezzo, 9.99)
self.assertEqual(self.prodotto.disponibilita, 10)
def test_aggiunta_quantita_venduta(self):
self.assertEqual(self.prodotto.pezzi_venduti, 0)
self.prodotto.pezzi_venduti = 5
self.assertEqual(self.prodotto.pezzi_venduti, 5)
def test_riduzione_disponibilita(self):
self.assertEqual(self.prodotto.disponibilita, 10)
self.prodotto.disponibilita -= 3
self.assertEqual(self.prodotto.disponibilita, 7)
def test_guadagno_totale(self):
self.assertEqual(self.prodotto.guadagno_totale, 0)
self.prodotto.pezzi_venduti = 5
self.assertEqual(self.prodotto.guadagno_totale, 49.95) # 5 * 9.99
def tearDown(self):
self.prodotto.delete()
# test di aggiunta di un prodotto al carrello #test ok
class ProdottoCarrelloTest(TestCase):
def test_str_method(self):
utente = Utente.objects.create(username='testuser', email='test@example.com')
vetrina = Vetrina.objects.create(ID_vetrina='Test Vetrina')
resoconto = ResocontoVendite.objects.create(ID_resoconto='Test Resoconto')
prodotto = Prodotto.objects.create(
nome='Test Prodotto',
codice_seriale=1,
vetrina=vetrina,
resoconto_vendite=resoconto
)
prodotto_carrello = ProdottoCarrello.objects.create(utente=utente, prodotto=prodotto)
self.assertEqual(str(prodotto_carrello), str(prodotto))
# test creazione di un carrello
class CarrelloTest(TestCase): #test ok
def test_str_method(self):
utente = Utente.objects.create(username='testuser', email='test@example.com')
carrello, _ = Carrello.objects.get_or_create(possessore=utente)
self.assertEqual(carrello.__str__(), 'testuser')
# test impostazione e verifica del pagamento
class PagamentoTest(TestCase): #test ok
def test_str_method(self):
pagamento = Pagamento.objects.create(numero_carta=1234567890)
self.assertEqual(pagamento.numero_carta, 1234567890)
# test di creazione di un ordine #test ok
class OrdineTest(TestCase):
def test_str_method(self):
ordine, _ = Ordine.objects.get_or_create(
numero_ordine='1',
carrello=json.dumps([]),
data_ordine=datetime.now(),
numero_carta='1234567890' # Fornisci un numero di carta valido qui
)
self.assertEqual(ordine.numero_ordine, '1')
if __name__ == '__main__':
unittest.main() | MattiaCani/Progetto-ISW | progettoISW/test_unitari/test_models_utente.py | test_models_utente.py | py | 3,801 | python | it | code | 1 | github-code | 36 |
2265860444 | import os
import sys
import importlib
import pkgutil
from contextlib import contextmanager
from typing import TypeVar, Union, Generator
from pathlib import Path
PathType = Union[os.PathLike, str]
T = TypeVar("T")
ContextManagerFunctionReturnType = Generator[T, None, None]
class cached_property(object):
""" A property that is only computed once per instance and then replaces
itself with an ordinary attribute. Deleting the attribute resets the
property.
Source: https://github.com/bottlepy/bottle/commit/fa7733e075da0d790d809aa3d2f53071897e6f76
"""
def __init__(self, func):
self.__doc__ = getattr(func, '__doc__')
self.func = func
def __get__(self, obj, cls):
if obj is None:
return self
value = obj.__dict__[self.func.__name__] = self.func(obj)
return value
@contextmanager
def push_python_path(path: PathType) -> ContextManagerFunctionReturnType[None]:
"""
Source: https://github.com/allenai/allennlp/blob/main/allennlp/common/util.py
"""
path = Path(path).resolve()
path = str(path)
sys.path.insert(0, path)
try:
yield
finally:
sys.path.remove(path)
def import_module_and_submodules(package_name: str) -> None:
"""
Source: https://github.com/allenai/allennlp/blob/main/allennlp/common/util.py
"""
importlib.invalidate_caches()
with push_python_path("."):
module = importlib.import_module(package_name)
path = getattr(module, "__path__", [])
path_string = "" if not path else path[0]
for module_finder, name, _ in pkgutil.walk_packages(path):
if path_string and module_finder.path != path_string:
continue
subpackage = f"{package_name}.{name}"
import_module_and_submodules(subpackage)
def print_dict(f, d, prefix=" ", incr_prefix=" "):
if not isinstance(d, dict):
f.write("%s%s\n" % (prefix, d))
if isinstance(d, tuple):
for x in d:
if isinstance(x, dict):
print_dict(f, x, prefix + incr_prefix, incr_prefix)
return
sorted_keys = sorted(d.keys())
for k in sorted_keys:
v = d[k]
if isinstance(v, dict):
f.write("%s%s:\n" % (prefix, k))
print_dict(f, v, prefix + incr_prefix, incr_prefix)
elif isinstance(v, list):
f.write("%s%s:\n" % (prefix, k))
for x in v:
print_dict(f, x, prefix + incr_prefix, incr_prefix)
else:
f.write("%s%s: %s\n" % (prefix, k, v))
| BorealisAI/DT-Fixup | spider/semparser/common/utils.py | utils.py | py | 2,616 | python | en | code | 15 | github-code | 36 |
16823182799 | '''
author: Ramayan Mardi
email: jaymisra.programmer@gmail.com
=======================================
:about: This is script file of the game,
:name: Flappy Game
:version: 1.0.1
:requirement:
python -> 3.10.0 or upper
pygame -> 2.5.2
This script file controls, all the game related stuff i.e game state, rendering etc.
:class Game: controls all the above stuff.
:function main: entry point of the game.
'''
import pygame
from pygame.locals import *
import os
# import in-built module/component
from component.utils import *
from component.flappy import Flappy
from component.pipes import ObsticalControler, MovingImage
from component.ui import GameUI
all = ("Game", "main")
# absolute path of the current files
ASSERT_PATH = "assert"
DATA_FILENAME = "data.json"
# Constants
SCREEN_SIZE = (640, 480)
TITLE = "Flappy Game"
class Game:
''':class: control the whole game, handles game state, handles rendering,
handles inputs, handles I/O in file.
'''
FPS = None # keep information of the the frame-per-second of the game
all = ("setup", "update", "new_game", "run")
def __init__(self, window_size: tuple[int, int], window_title: str):
self.screen = pygame.display.set_mode(window_size)
pygame.display.set_caption(window_title)
# clock for the game controls the game time.
self.clock = pygame.time.Clock()
# store all the game data need for processing game
self._gamedata = dict()
# decide whether the game updates or not
self._allow_update = True
# decide whether the game-over or not
self._gameover = False
# store the game state
self._game_state = "Menu"
def setup(self) -> None:
''':method: used to load all the assert for the game'''
# fetching all the data from the file.
self._gamedata = fetch(os.path.join(ASSERT_PATH, DATA_FILENAME))
# fetching all content for the about file.
with open(os.path.join(ASSERT_PATH, "about.txt"), "r") as f:
about_data = f.read()
# load entity for the game.
flappy_images = []
for i in range(3):
flappy_images.append(load_image(self._gamedata["yellowbird"][i], convert=(False, True)))
# position of the player is in middle of the screen.
self.flappy = Flappy(flappy_images=flappy_images, weight=self._gamedata["entity"]["weight"],
fly_speed=self._gamedata["entity"]["fly_speed"],
pos=(320, 184))
# load all the obstical for the game.
green_pipe = load_image(self._gamedata["game_objects"]["pipe-green"], convert=(True, False))
self.obstical = ObsticalControler(green_pipe)
# load all the environment for the game.
self.background_day_image = MovingImage(load_image(
self._gamedata["game_objects"]["background-day"],
SCREEN_SIZE, (True, False)), (0, 0))
self.base_image = MovingImage(load_image(self._gamedata["game_objects"]["base"], (640, 112),
convert=(True, False)), (0, 368))
# load all sounds for the game.
self.hit = pygame.mixer.Sound(self._gamedata["sfx"]["hit"])
self.wing = pygame.mixer.Sound(self._gamedata["sfx"]["wing"])
self.point = pygame.mixer.Sound(self._gamedata["sfx"]["point"])
self.die = pygame.mixer.Sound(self._gamedata["sfx"]["die"])
# load all the UI for the game.
self.gameui = GameUI(self._gamedata)
# text box for the game.
self.textbox = TextBox(about_data, (0, 0), fontname=self._gamedata["font"]["gamefont"],
fontcolor="#F5EBEB", fontsize=22)
def update(self, delta_time: float, **kw) -> None:
''':method: used to update all the game related stuff.
kw: contains all the inputs data.
'''
# game code -------------------------------------
if self._game_state == "Menu":
self.screen.fill("#383838")
self.gameui.show_gamemenu(self.screen)
self.gameui.show_about_btn(self.screen)
'''
if continuation of the game is possible than the continue button gets
highlighted.
'''
self.gameui.gamemenu_boxlayout.chlidren[1].active = self._gamedata["continue"]
# trigger the functions according to the button pressed.
# `New Game` button pressed.
if self.gameui.gamemenu_boxlayout.chlidren[0].pressed:
self._game_state = "Start New Game" # game state changed
'''
only the the assert would draw and nothing gets updated.
'''
self._allow_update = False
# `Continue` button pressed and continuation is possible.
elif self.gameui.gamemenu_boxlayout.chlidren[1].pressed and self._gamedata["continue"]:
# placing entity to the previous position.
self.flappy.rect.topleft = self._gamedata["entity"]["pos"]
# placing all the obstical for the game to the previous position.
self.obstical._custom_pipe_pos(self._gamedata["pipes_pos"]["toppipe_list"],
self._gamedata["pipes_pos"]["bottompipe_list"])
# placing all the environment for the game to the previous position.
self.base_image.moving_images["img1"][1].topleft = self._gamedata["other_entity"]["base_pos"]["img1"]
self.base_image.moving_images["img2"][1].topleft = self._gamedata["other_entity"]["base_pos"]["img2"]
self.background_day_image.moving_images["img1"][1].topleft = self._gamedata["other_entity"]["background_pos"]["img1"]
self.background_day_image.moving_images["img2"][1].topleft = self._gamedata["other_entity"]["background_pos"]["img2"]
# set previous score.
self.obstical.score = self._gamedata["score"]
self.obstical.previous_score = self._gamedata["previous_score"]
# start the game as previous
self._game_state = "Start New Game"
self._allow_update = False
# `Setting` button is pressed.
elif self.gameui.gamemenu_boxlayout.chlidren[2].pressed:
'''
active the fps button which is using by the game.
'''
if self._gamedata["fps"] == 30:
self.gameui.settingmenu_boxlayout_1.chlidren[1].active = True
else:
self.gameui.settingmenu_boxlayout_1.chlidren[2].active = True
# changed the game state
'''
setting game state is required because in those blocks of settings.
content a code that are updated in each frame when the game state
is set to be "Settings"
'''
self._game_state = "Settings"
# `About` button is pressed
elif self.gameui.about_btn.pressed:
# same reason as like the `Setting` button.
self._game_state = "About"
# Gets updated in each frame when 'Setting' is activate.
elif self._game_state == "Settings":
self.screen.fill("#383838")
self.gameui.show_settingmenu(self.screen)
if kw["K_x"]:
self._game_state = "Menu"
# trigger functions when the button inside the setting menu get pressed.
children = self.gameui.settingmenu_boxlayout_1.chlidren
# TODO: make the below code more clean.
# handles the state of fps buttons and game fps setting
if children[2].pressed:
children[2].active = True
# updates the entire game fps
self._gamedata["fps"] = 60
elif children[1].pressed:
children[2].active = False
# updates the entire game fps
self._gamedata["fps"] = 30
children[1].active = not children[2].active
# Gets updated in each frame when 'About' is activate.
elif self._game_state == "About":
# textbox content all the details for the game gets visible.
self.textbox.blit(self.screen)
if kw["K_x"]:
self._game_state = "Menu"
# Gets updated in each frame when 'Start New Game' is activate.
elif self._game_state == "Start New Game":
# called when the game is not over and ready to play.
if not self._gameover:
# drawing game background.
self.background_day_image.blit(self.screen)
self.flappy.blit(self.screen) # entity.
# drawing game environment
self.obstical.toppipe.draw(self.screen)
self.obstical.bottompipe.draw(self.screen)
self.base_image.blit(self.screen)
self.gameui.show_number(self.screen, str(self.obstical.score))
# if allow update is True
if self._allow_update:
# update all the entities.
self.background_day_image.move_image(self.screen, 50, delta_time, (-1, 0))
self.flappy.update(delta_time, **kw)
self.obstical.update(delta_time)
self.base_image.move_image(self.screen, 100, delta_time, (-1, 0))
# check collision of entity with the pipes
if self.obstical.collision(self.flappy.collision_rect):
self.hit.play()
self._allow_update = False
self._gameover = True
# check collision of the entity with the base
if self.base_image.collision(self.flappy.collision_rect):
self.hit.play()
self._allow_update = False
self._gameover = True
'''
play the sound when the entity flap it's wings.
'''
if kw["K_SPACE"]:
self.wing.play()
'''
play sound when the point get incresed
'''
if self.obstical.score > self.obstical.previous_score:
self.point.play()
self.obstical.previous_score = self.obstical.score
else:
# message show when update is False
'''
This message is show during the game start, continuation of the game,
and also during the game get paused.
'''
self.gameui.start_message(self.screen)
'''
below code handles the functionality of the pause mechanism by
manipulating :attr self._allow_update:
'''
if kw["K_p"] and self._allow_update:
self._allow_update = False
elif kw["K_p"] and not self._allow_update:
self._allow_update = True
elif kw["K_SPACE"] and not self._allow_update:
self._allow_update = True
'''
during the game is running, and the back button is pressed then the
continuation data gets save, and continuation gets updated.
'''
if kw["K_x"]:
# store the entity data.
self._gamedata["entity"] = {
"weight": self.flappy.weight,
"fly_speed": self.flappy.fly_speed,
"pos": self.flappy.rect.center}
# store the list of pipe position as a data of both top-pipes and bottom-pipes.
self._gamedata["pipes_pos"] = {
"toppipe_list": [pipe.rect.topleft for pipe in self.obstical.toppipe_list],
"bottompipe_list": [pipe.rect.topleft for pipe in self.obstical.bottompipe_list]}
# store the data of other entities i.e environment stuff.
self._gamedata["other_entity"] = {"base_pos": {
"img1": self.base_image.moving_images["img1"][1].topleft,
"img2": self.base_image.moving_images["img2"][1].topleft
},
"background_pos": {
"img1": self.background_day_image.moving_images["img1"][1].topleft,
"img2": self.background_day_image.moving_images["img2"][1].topleft}
}
# continuation get activate and score is preserved.
self._gamedata["continue"] = True
self._gamedata["score"] = self.obstical.score
self._gamedata["previous_score"] = self.obstical.previous_score
# back to the main menu
self._game_state = "Menu"
self.new_game()
# called when the game gets over.
elif self._gameover:
# store high-score data if score is greater.
if self.obstical.score > self._gamedata["highscore"]:
self._gamedata["highscore"] = self.obstical.score
# if high-score is greater than '0' it gets displayed on screen.
if self._gamedata["highscore"]:
self.gameui.show_highscore(self.screen, self._gamedata["highscore"])
# finally game-over message would be shown.
self.gameui.gameover_message(self.screen)
if kw["K_x"]:
# back to the main menu
self._game_state = "Menu"
# reset the game over button
self.new_game()
self._gamedata["continue"] = False
if kw["K_r"] and self._gameover:
# resume the game and its state
self.new_game()
def new_game(self) -> None:
''':method: used to set-up new game.'''
# reset all the values.
self._allow_update = True
self.obstical.generate_pipe()
self.obstical.score = 0
self.obstical.previous_score = self.obstical.score
self.flappy.rect.topleft = (480 // 2, 368 // 2)
self._gameover = False
def run(self) -> None:
''':method: main-loop of the game.'''
# load all the assert for the game.
self.setup()
# running main-loop of the game
running = True
while running:
# shortcut keys responsible for controlling inputs in the whole game.
self.ShortCuts = {"K_SPACE": False, "K_r": False, "K_x": False,
"K_p": False}
# tracking the pygame events
for event in pygame.event.get():
if event.type == pygame.QUIT or \
(event.type == pygame.KEYDOWN and event.key == pygame.K_ESCAPE):
running = False
# check key presses for the game
if event.type == pygame.KEYDOWN:
if event.key == pygame.K_SPACE:
self.ShortCuts["K_SPACE"] = True
if event.key == pygame.K_p:
self.ShortCuts["K_p"] = True
if event.key == pygame.K_r:
self.ShortCuts["K_r"] = True
if event.key == pygame.K_x:
self.ShortCuts["K_x"] = True
# delta time of the entire game.
delta_time = self.clock.tick(self._gamedata["fps"]) / 1000.0
# update the whole game.
self.update(delta_time, **self.ShortCuts)
pygame.display.update()
# quit the game
save(os.path.join(ASSERT_PATH, DATA_FILENAME), self._gamedata)
pygame.quit()
exit()
def main():
''':function: entry point of the game.'''
pygame.init() # initializing pygame
Game(SCREEN_SIZE, TITLE).run()
if __name__ == '__main__':
main()
| ramayanbindas/Flappy | flappy.py | flappy.py | py | 16,627 | python | en | code | 1 | github-code | 36 |
4412908153 | a,b,x = map(int, input().split())
ok = 0
ng = 10**9+1
while ng-ok > 1:
n = (ok + ng)//2
d = len(str(n))
if a*n + b*d <= x:
ok = n
else:
ng = n
print(ok)
| burioden/atcoder | submissions/abc146/c.py | c.py | py | 172 | python | en | code | 4 | github-code | 36 |
16666146143 | a = input()
mod = "0123456789X"
j = 1
t = 0
for i in range(12):
if a[i] == '-':
continue
t += int(a[i]) * j
j += 1
if mod[t % 11] == a[12]:
print("Right")
else:
a = a[:12] + mod[t % 11]
print(a)
| wLUOw/LuoGu | Lists/ProbList101/P1055/src/main.py | main.py | py | 227 | python | en | code | 0 | github-code | 36 |
19337719477 | import pygame
import math
from global_timer import TimedObject
class BossDeathCutscene(TimedObject):
def __init__(self, map_center_pos, boss) -> None:
self.lifetime = 999990
self.map_center_pos = map_center_pos
self.boss = boss
self.boss.global_timer.attach(self)
self.boss.in_active_spell_action = True
self.boss.defeated = True
self.boss_in_map_center = False
self.emotion_was_sayed = False
self.emotion_was_sayed2 = False
#---------
self.wave_points = []
self.wave_segment_size = 3
def timer_tick(self):
self.boss.in_active_spell_action = True
if self.boss_in_map_center == True:
self.lifetime -= 1
if self.lifetime <= 40:
self.boss.center_pos = [9999,9999]
font = pygame.font.SysFont('arial', 40)
score = font.render("You win!", True, (255, 255, 255))
self.boss.surface.blit(score, (300, 200))
self.boss.snake.food.hide()
if self.lifetime <= 15:
if self.emotion_was_sayed2 == False:
self.emotion_was_sayed2 = True
self.boss.aniki.play()
self.boss.center_pos = [9999,9999]
font = pygame.font.SysFont('arial', 20)
score = font.render("Thank you for your attention", True, (255, 255, 255))
self.boss.surface.blit(score, (300, 260))
#--------
self.boss.center_pos = [9999,9999]
font = pygame.font.SysFont('arial', 20)
score = font.render("created by Jordenskraften", True, (255, 255, 255))
self.boss.surface.blit(score, (300, 290))
else:
if self.lifetime %3 == 0:
self.boss.color = (255,0,0)
else:
self.boss.color = (125,0,0)
#взрываем её
else:
#если босс дошел до центра
if (
abs(self.boss.center_pos[0] - self.map_center_pos[0]) <= 1 and
abs(self.boss.center_pos[1] - self.map_center_pos[1]) <= 1
):
self.boss_in_map_center = True
self.lifetime = 110
if self.emotion_was_sayed == False:
self.emotion_was_sayed = True
self.boss.boss_death.play()
self.boss.create_floating_text("Okay, you got me!", True)
self.boss.snake.food.hide()
#print("boss in center")
else:
#тута двигаем босса к центру
d_x = self.map_center_pos[0] - self.boss.center_pos[0]
d_y = self.map_center_pos[1] - self.boss.center_pos[1]
distance = math.sqrt(d_x**2 + d_y**2)
step_x = d_x / distance * 1.5
step_y = d_y / distance * 1.5
dir = (step_x,step_y)
self.boss.center_pos = [
self.boss.center_pos[0] + dir[0],
self.boss.center_pos[1] + dir[1]
]
self.boss.snake.food.hide()
if len(self.wave_points) >= 1:
for p in self.wave_points:
self.move_wave_segment(p)
self.draw_wave_segment(p)
self.check_for_snakes_bodies_collision(p)
def enter():
pass
def death(self):
self.boss.global_timer.detach(self)
self.boss.in_active_spell_action = True
self.boss.defeated = True
self.boss.base_abilities_cd = 999999
self.boss.active_abilities_cd = 999999
self.boss.boss_ultimate_ability_cd = 999999
self.boss.minions_cd = 999999
del(self)
| jordenskraften/snake-python | boss_death_cutscene.py | boss_death_cutscene.py | py | 3,964 | python | en | code | 0 | github-code | 36 |
3992904999 | #!/opt/local/bin/python
from this import d
from pygel3d import hmesh, graph, gl_display as gl
from os import getcwd
graphs = [
'hand.graph',
'armadillo_symmetric.graph',
'bunny.graph',
'feline.graph',
'fertility.graph',
'warrior.graph']
objs = [
'usai_hand_tri.obj',
'armadillo.obj',
'bunny.obj',
'feline.obj',
'fertility_tri.obj',
'warrior.obj'
]
iters = [(150,0.5,0.5), (50,0.5,1.0), (50,0.5,1.0), (50,0.5,1.0), (50,0.5,1.0), (50,0.5,1.0)]
mesh_dir = '../../../data/ReferenceMeshes/'
skel_dir = '../../../data/Graphs/'
viewer = gl.Viewer()
for g_file, o_file, params in zip(graphs, objs, iters):
iter, dist_wt, lap_wt = params
print("Remeshing " + o_file)
print('Building FEQ')
s = graph.load(skel_dir + g_file)
m_skel = hmesh.skeleton_to_feq(s)#, [5.0]*len(s.nodes()))
hmesh.cc_split(m_skel)
hmesh.cc_smooth(m_skel)
print('Fitting to reference mesh')
ref_mesh = hmesh.load(mesh_dir + o_file)
fit_mesh = hmesh.Manifold(m_skel)
fit_mesh = hmesh.fit_mesh_to_ref(fit_mesh, ref_mesh, local_iter=iter, dist_wt=dist_wt, lap_wt=lap_wt)
print("Displaying. HIT ESC IN GRAPHICS WINDOW TO PROCEED...")
viewer.display(fit_mesh, reset_view=True)
hmesh.save(o_file + "-out.obj", fit_mesh)
# viewer.display(m_skel, reset_view=True)
| janba/GEL | src/demo/FEQ-Remeshing/feq-remeshing-example.py | feq-remeshing-example.py | py | 1,290 | python | en | code | 82 | github-code | 36 |
36339108749 | import random as r
class Player:
def __init__(self):
self.points = 30
self.toughness = 0
self.damage = 0
self.kills = 0
self.update_stats()
self.print_info()
def update_stats(self):
print(f"you have {self.points} points, you can spread them between toughness and damage")
while True:
delta_toughness = Player.get_stat("toughness")
delta_damage = Player.get_stat("damage")
if delta_toughness + delta_damage == self.points:
self.toughness += delta_toughness
self.damage += delta_damage
break
else:
print("you had 30 points, you have to spend them all and not a one more!")
self.points = 0
def print_info(self):
print("player stats:")
print(f"toughness: {self.toughness}")
print(f"damage: {self.damage}")
print(f"kills: {self.kills}")
def level_up(self):
print("you have leveled up, congrats")
self.print_info()
self.points = 30
self.update_stats()
def deal_damage(self):
return self.damage
def be_damaged(self, dealt_damage):
self.toughness -= dealt_damage
if self.toughness <= 0:
print("game over")
raise SystemExit
@staticmethod
def get_stat(stat):
while True:
try:
return int(input(f"how much points do you want to spend on {stat}?"))
except ValueError:
print("must be a whole number")
class Monster:
monster_names = ['spider', 'moskovyt', 'goblin', 'slime', 'skeleton', 'ghoul', 'werewolf', 'ghost', 'bandit',
'viter(super_rare)', 'witch']
monsters = {}
def __init__(self, toughness, damage, monster_name, monster_id):
self.monster_id = monster_id
self.toughness = toughness
self.damage = damage
self.name = monster_name
self.print_info()
def print_info(self):
print(f"{self.monster_id} - {self.name}")
print(f"toughness - {self.toughness}")
print(f"damage - {self.damage}")
print("===============")
def deal_damage(self):
return self.damage()
def be_damaged(self, dealt_damage, monster_id, player):
self.toughness -= dealt_damage
if self.toughness <= 0:
Monster.monsters.pop(monster_id)
print("you killed this monster")
player.kills += 1
player.level_up()
@classmethod
def spawn_monsters(cls):
for i in range(3):
cls.monsters.update(cls.generate_monster(i))
@classmethod
def generate_monster(cls, index):
new_toughness = r.randint(0, 30)
new_damage = r.randint(0, 30)
new_monster_name = cls.generate_name()
return {
index: Monster(toughness=new_toughness, damage=new_damage, monster_name=new_monster_name, monster_id=index)
}
@classmethod
def generate_name(cls):
return cls.monster_names[r.randint(0, len(cls.monster_names) - 1)]
# monster_names = ['spider', 'moskovyt', 'goblin', 'slime', 'skeleton', 'ghoul', 'werewolf', 'ghost', 'bandit',
# monster_names[0]
@staticmethod
def print_all_info():
for monster in Monster.monsters.values():
monster.print_info()
class Game:
class Fight:
def __init__(self, player):
Monster.spawn_monsters()
self.player = player
self.battle()
def damage_phase(self):
choice = int(input("which monster do you want to fight?"))
self.player.be_damaged(Monster.monsters[choice].damage)
Monster.monsters[choice].be_damaged(dealt_damage=self.player.damage, monster_id=choice, player=self.player)
def battle(self):
while True:
self.damage_phase()
if len(Monster.monsters) == 0:
print("you have killed all monsters")
break
Monster.print_all_info()
self.player.print_info()
if __name__ == "__main__":
player_1 = Player()
Game.Fight(player=player_1)
| Sashabus/homework | game/main.py | main.py | py | 4,252 | python | en | code | 0 | github-code | 36 |
31948081711 | import requests
import streamlit as st
st.title("Weather Report ☁️")
def kelvin_to_celsius(kelvin):
return kelvin - 273.15
def kelvin_to_fahrenheit(kelvin):
return (kelvin - 273.15) * 9/5 + 32
def get_wind_direction(degrees):
directions = ["North", "North-East", "East", "South-East", "South", "South-West", "West", "North-West"]
index = int((degrees + 22.5) / 45) % 8
return directions[index]
def main():
try:
city = st.text_input("Enter Your City")
if st.button("Check"):
api_key = "b1d2ededf0d77faf89a0c7e0a3acc4d1"
final_url = "http://api.openweathermap.org/data/2.5/weather?q={}&appid={}".format(city, api_key)
result = requests.get(final_url)
data = result.json()
if data['cod'] == '404':
st.error("City not found.")
return
temperature_kelvin = data['main']['temp']
temperature_celsius = round(kelvin_to_celsius(temperature_kelvin))
temperature_fahrenheit = round(kelvin_to_fahrenheit(temperature_kelvin))
humidity = data['main']['humidity']
pressure = data['main']['pressure']
wind_speed = data['wind']['speed']
wind_direction_degrees = data['wind']['deg']
wind_direction_cardinal = get_wind_direction(wind_direction_degrees)
cordinatelon = data['coord']['lon']
cordinatelat = data['coord']['lat']
visibility = data.get('visibility')
wind_speed = data['wind']['speed']
weather_condition = data['weather'][0]['description']
st.subheader(f"Weather in {city}:")
st.text(f"Temperature: {temperature_celsius} °C ({temperature_fahrenheit:.2f} °F)")
st.text(f"Humidity: {humidity}%")
st.text(f"Wind Speed: {wind_speed*3.6:.2f} km/h")
st.text(f"Wind Direction: {wind_direction_cardinal}")
st.text(f"Weather Condition: {weather_condition.capitalize()}")
st.text(f"Latitude: {cordinatelat}")
st.text(f"Longitude: {cordinatelon}")
st.text(f"Pressure: {pressure} mb")
if visibility:
st.text(f"Visibility: {visibility / 1000:.2f} km")
else:
st.text("Visibility data not available.")
except(KeyError):
st.error("Please Enter the City Name")
if __name__ == "__main__":
main() | Yashwanth-2701/Weather-Report | app.py | app.py | py | 2,512 | python | en | code | 0 | github-code | 36 |
71335938983 | # Definition for a binary tree node.
# class TreeNode:
# def __init__(self, val=0, left=None, right=None):
# self.val = val
# self.left = left
# self.right = right
class Solution:
def rightSideView(self, root: Optional[TreeNode]) -> List[int]:
height = 0
subtree = root
def height(subtree):
if not subtree :
return 0
return max(height(subtree.left), height(subtree.right))+1
tree_height = height(root)
result =[-1 for x in range(tree_height)]
import collections
q = collections.deque()
q.append((root, 0))
while q:
x, level = q.popleft()
if not x:
continue
if x.left:
q.append((x.left,level+1))
if x.right:
q.append((x.right, level+1))
result[level] = x.val
return result
| architjee/solutions | Leetcode/right side view of binary tree.py | right side view of binary tree.py | py | 932 | python | en | code | 0 | github-code | 36 |
17238007858 | # Q6. Write a program for creation, insertion, deletion operation in files, text files, csv files, excel files, etc.
def read_file(file):
for line in file.readlines():
print(line.rstrip('\n'))
def write_file(file):
n = int(input('Enter no. of line you want to have in file : '))
for i in range(n):
file.write(input())
if i != n-1 :
file.write("\n")
def delete_line(file_name):
file = open(file_name, 'r')
lines = file.readlines()
print(lines)
file.close()
n = int(input('Enter line no you want to delete (MAX : ' + str(len(lines)) + ':)'))
if n > len(lines):
print('Invalid Line no.')
return
file = open(file_name, 'w')
for i in range(len(lines)):
if i+1 != n:
if i + 2 == n and i + 2 == len(lines): # if we have to delete the last line
file.write(lines[i].rstrip('\n'))
else:
file.write(lines[i])
file.close()
### Main ###
# Creating new text file
file_name = 'file1.txt'
file = open(file_name, 'w') # opening a file in write mode
# Writing file
write_file(file)
file.close()
file = open(file_name, 'r+') # opening a file in read & write mode
# Read file
#print("File contents are as : ", file.read()) # Method 1
# Method 2
print("\n\nFile contents are as : ")
read_file(file)
file.close()
# Deleting a specific line from text file
#delete_line(file_name)
delete_line('file1.txt')
print("\n\nFile contents are After Delete operation as : ")
file = open(file_name, 'r+') # opening a file in read & write mode
read_file(file)
file.close()
| Sandeepbhatt3287/JMI-MCA | V-sem/PatternMatchingUsingPythonProgramming/LAB_ASSIGNMENTS/Assignment1(29SEP2020)/Solution6A.py | Solution6A.py | py | 1,538 | python | en | code | 0 | github-code | 36 |
74050471784 | import parlai.core.build_data as build_data
import os
from parlai.core.build_data import DownloadableFile
RESOURCES = [
DownloadableFile(
'https://raw.githubusercontent.com/openai/generating-reviews-discovering-sentiment/master/data/test_binary_sent.csv',
'test_binary_sent.csv',
'519cea7ed4d22fe7ec4eccbb3d5ba6d88902a3b15ce129f476aa8364463a9fc7',
zipped=False,
),
DownloadableFile(
'https://raw.githubusercontent.com/openai/generating-reviews-discovering-sentiment/master/data/train_binary_sent.csv',
'train_binary_sent.csv',
'6003623bcb35aad3a446a265b8931b7ccab61fcc10f2e9c1fec916ff67c7be35',
zipped=False,
),
DownloadableFile(
'https://raw.githubusercontent.com/openai/generating-reviews-discovering-sentiment/master/data/dev_binary_sent.csv',
'dev_binary_sent.csv',
'f34c4987fea208fefc2d62a1b42c83a766cbfc7ce58c2a878ef953cf91f01729',
zipped=False,
),
]
def build(opt):
dpath = os.path.join(opt['datapath'], 'SST')
version = None
if not build_data.built(dpath, version_string=version):
print('[building data: ' + dpath + ']')
if build_data.built(dpath):
# An older version exists, so remove these outdated files.
build_data.remove_dir(dpath)
build_data.make_dir(dpath)
# Download the data.
for downloadable_file in RESOURCES:
downloadable_file.download_file(dpath)
# Mark the data as built.
build_data.mark_done(dpath, version_string=version)
| facebookresearch/ParlAI | parlai/tasks/sst/build.py | build.py | py | 1,576 | python | en | code | 10,365 | github-code | 36 |
71845266663 | from .opt_cards import DCONSTR
def add_dtable(se, key, value):
"""Add a DTABLE entry to the SE and the optmodel
Parameters
----------
key : str
The DTABLE unique key. The algorithm automatically attempts to add
a sufix to prevent repeated keys.
value : float
The value corresponding to `key`.
Returns
-------
key : str
The resulting key.
"""
optmodel = se.model.optmodel
origkey = key
if key in optmodel.dtables.keys():
if len(key) >= 8:
raise ValueError('{0} is an already existing DTABLE entry!'.
format(key))
if key in optmodel.dtable_prefixes:
optmodel.dtable_prefixes[key] += 1
else:
optmodel.dtable_prefixes[key] = 0
sufix = str(optmodel.dtable_prefixes[key])
key = key + sufix.rjust(8 - len(key), '0')
if len(key) > 8:
raise ValueError('Use a smaller key')
if origkey in se.dtables.keys():
raise
se.dtables[origkey] = [key, value]
optmodel.dtables[key] = float(value)
return key
def add_dresp(se, dresp):
"""Add a DRESP(123) entry to the SE and the optmodel
Parameters
----------
dresp : :class:`DRESP1`, :class:`DRESP2` or :class:`DRESP3`
The response object.
"""
se.dresps.append(dresp)
se.model.optmodel.dresps[dresp.id] = dresp
if isinstance(dresp, DRESP3):
se.model.optmodel.groups.add(dresp.group)
def add_deqatn(se, deqatn):
"""Add a DEQATN entry to the SE and the optmodel
Parameters
----------
deqatn : :class:`DEQATN`
The equation to be added.
"""
se.deqatns.append(deqatn)
se.model.optmodel.deqatns[deqatn.id] = deqatn
def add_dvar(se, dvar):
"""Add a DESVAR entry to the SE and the optmodel
Parameters
----------
dvar : :class:`DESVAR`
Design variable object.
"""
if dvar.label in se.dvars.keys():
raise
se.dvars[dvar.label] = dvar
se.model.optmodel.dvars[dvar.id] = dvar
def add_dvprel(se, dvprel):
"""Add a DVPREL(12) entry to the SE and the optmodel
Parameters
----------
dvprel : :class:`DVPREL1` or :class:`DVPREL2`
Design property-to-variable object.
"""
se.dvprels.append(dvprel)
se.model.optmodel.dvprels[dvprel.id] = dvprel
def add_constraint(se, dcid, dresp, lb, ub):
"""Add a DCONSTR entry to the SE and the optmodel
Parameters
----------
dcid : int
Design constraint set id.
dresp : :class:`DRESP1`, :class:`DRESP2` or :class:`DRESP3`
The response object.
lb : float or None
Lower boundary for the constraint.
ub : float or None
Upper boundary for the constraint.
"""
dconstr = DCONSTR(dcid, dresp.id, lb, ub)
se.dconstrs.append(dconstr)
se.model.optmodel.dconstrs[dconstr.id] = dconstr
| compmech/structmanager | structmanager/optimization/sol200/edit_structural_element.py | edit_structural_element.py | py | 2,917 | python | en | code | 1 | github-code | 36 |
12755359416 | import json
from loadsmart_elb_operations.elb_operations import *
def elb_healthcheck(client, elbName):
health = True
if elb_validation(client, elbName):
instances_state = instances_health(client, elbName)
if 'OutOfService' in instances_state.values():
health = False
if health:
res = {
'status_code': '200',
'Description': 'The service is up'
}
else:
res = {
'status_code': '400',
'Description': 'The service is down'
}
return res
else:
return {
'message': 'ELB Not Found',
'status_code': '401'
}
def instances_list(client, elbName):
if elb_validation(client, elbName):
instances = get_instances(client, elbName)
return instances
else:
return {
'message': 'ELB Not Found',
'status_code': '401'
}
def elb_add_instances(client, elbName, instance_add):
if elb_validation(client, elbName):
instances = get_instances(client, elbName)
if not instance_add['name'] in instances:
response = register_instance(client, elbName, instance_add['name'])
return {
'message': "Instance added",
'status_code': '201'
}, 201
else:
return {
'message': 'Instance already on load balancer',
'status_code': '409'}, 409
else:
return {
'message': 'ELB Not Found',
'status_code': '401'
}
def elb_remove_instances(client, elbName, instance_revome):
if elb_validation(client, elbName):
instances = get_instances(client, elbName)
if not instance_revome['name'] in instances:
return {
'message': 'Instance not found in load balancer',
'status_code': ''
}
else:
response = deregister_instance(client, elbName, instance_revome['name'])
if response.get('ResponseMetadata')['HTTPStatusCode'] == 200:
return {
'message': 'Instance removed/deregistered from ELB',
'status_code': 200
}, 200
else:
return {
'message': 'ELB Not Found',
'status_code': '401'
}
| hbarajas/loadsmart_sre | app_operations.py | app_operations.py | py | 1,915 | python | en | code | 0 | github-code | 36 |
27048798228 | import zedlib
import pygame
import math
class GameSprite:
def __init__(self, image, x, y):
self.image = image
self.rect = self.image.get_rect()
self.position = zedlib.Position(x, y)
self.x_acceleration = 0.0
self.y_acceleration = 0.0
self.x_velocity = 0.0
self.y_velocity = 0.0
self.max_y_velocity = None
self.max_x_velocity = None
self.move_x = 0.0
self.move_y = 0.0
self.update_rect_x()
self.update_rect_y()
def draw(self, surface, camera = None):
""" Draw image on a given surface, a zedlib.Camera can also be used """
if camera:
surface.blit(self.image, camera.apply(self.rect))
else:
surface.blit(self.image, self.rect)
def update_rect_x(self):
""" Update x position of the rect, from self.position """
self.rect.x = self.position.get_position()[0]
def update_rect_y(self):
""" Update y position of the rect, from self.position """
self.rect.y = self.position.get_position()[1]
def update_movement(self, collisions=[]):
""" Update the position of rect and handle collisions """
self.apply_acceleration()
if self.move_x and self.move_y:
movement = self.get_diagonal_movement(math.fabs(self.move_x))
self.move_x = math.copysign(movement[0], self.move_x)
self.move_y = math.copysign(movement[1], self.move_y)
self.move_x += self.x_velocity
self.position.move_x(self.move_x)
self.handle_horizonal_collisions(collisions)
self.move_x = 0.0
self.move_y += self.y_velocity
self.position.move_y(self.move_y)
self.handle_vertical_collisions(collisions)
self.move_y = 0.0
def apply_acceleration(self):
self.x_velocity += self.x_acceleration
self.y_velocity += self.y_acceleration
if self.max_x_velocity:
if self.x_velocity > self.max_x_velocity:
self.x_velocity = self.max_x_velocity
if self.max_y_velocity:
if self.y_velocity > self.max_y_velocity:
self.y_velocity = self.max_y_velocity
def handle_horizonal_collisions(self, collisions):
""" Stop rect from moving through collisions horizontally """
self.update_rect_x()
collision_objects = pygame.sprite.spritecollide(self, collisions, False)
for collision_obj in collision_objects:
collision_obj.horizontal_collide(self)
self.position.set_x(self.rect.x)
if collision_objects: self.collision_occured()
def handle_vertical_collisions(self, collisions):
""" Stop rect from moving through collisions vertically """
self.update_rect_y()
collision_objects = pygame.sprite.spritecollide(self, collisions, False)
for collision_obj in collision_objects:
collision_obj.vertical_collide(self)
self.position.set_y(self.rect.y)
if collision_objects: self.collision_occured()
def collision_occured(self):
""" Called when sprite has collided with an object """
pass
def get_diagonal_movement(self, speed):
""" Reduce diagonal movement to be equal to normal movement speed """
move_speed = math.sqrt( (speed*speed)/2.0 )
return (move_speed, move_speed)
| JoeZlonicky/ZedLib | zedlib/game_sprite.py | game_sprite.py | py | 3,403 | python | en | code | 0 | github-code | 36 |
19839716589 | from __future__ import print_function
import cv2 as cv
import matplotlib.pyplot as plt
if __name__ == "__main__":
img = cv.imread('images/dog1.jpeg')
gray_img = cv.cvtColor(img, cv.COLOR_BGR2GRAY)
dst = cv.equalizeHist(gray_img)
gray_hst = cv.calcHist([gray_img], [0], None, [256], [0, 256])
gray_hst_dst = cv.calcHist([dst], [0], None, [256], [0, 256])
cv.imshow('Source image', img)
cv.imshow('Source gray image', dst)
cv.imshow('Equalized Image', gray_img)
plt.figure()
plt.title("GrayScale Histogram")
plt.xlabel('Bins')
plt.ylabel('# of pixels')
plt.plot(gray_hst_dst)
plt.xlim([0, 255])
plt.show()
cv.waitKey()
| AnhVietPham/Deep-Learning | Computer-Vision/opencv-course/histogram.py | histogram.py | py | 683 | python | en | code | 0 | github-code | 36 |
35898596942 | from create_dataframe import create_dataframe
import numpy as np
meta_data = create_dataframe()
def split_save():
# 25 images per class
value = meta_data.groupby('category_name', group_keys=False).apply(lambda x: x.sample(n=25, replace=False))
value.sort_index(inplace=True)
meta_data.drop(value.index, axis=0, inplace=True)
meta_data.to_csv('train_metadata.csv', index=False)
value.to_csv('val_metadata.csv', index=False)
def create_decoder():
decode = {n: i for i, n in meta_data.groupby('category_name').category_number.first().iteritems()}
np.save('decode.npy', decode)
def run_package():
split_save()
create_decoder() | mckunkel/TestDistilling | data_stuff/split_save_data.py | split_save_data.py | py | 667 | python | en | code | 0 | github-code | 36 |
37706551416 | import os
import time
import shutil
import unittest
import threading
from tadek.core import settings
from tadek.engine import channels
from tadek.engine import testresult
from tadek.engine.testresult import *
from engine import commons
__all__ = ["TestResultTest", "TestResultCoreDumpsTest"]
class OutputThread(threading.Thread):
def __init__(self, testResult, value):
threading.Thread.__init__(self)
self._testResult = testResult
self._value = value
self._device = commons.FakeDevice()
def run(self):
self._testResult.start(None)
for x in xrange(20):
self._testResult.startTest(self._value, self._device)
time.sleep(0.001)
self._testResult.stopTest(self._value, self._device)
time.sleep(0.001)
class TestResultTest(unittest.TestCase):
def setUp(self):
self.originalChannels = testresult.channels
testresult.channels = commons.DummyChannels()
def tearDown(self):
testresult.channels = self.originalChannels
def testIterChannels(self):
channel1 = commons.DummyChannel("Test_0")
channel2 = commons.DummyChannel("Test_1", False)
testresult.channels.scenario = [channel1, channel2]
result = TestResult()
result.start(None)
self.assertEqual([channel1, channel2], result.get())
def testMultipleChannelsLog(self):
device = commons.FakeDevice()
channels = []
for x in xrange(2):
channel = commons.DummyChannel("Test_%d" % x, True)
testresult.channels.scenario.append(channel)
channels.append(channel)
result = TestResult()
result.start(None)
caseResult = commons.DummyTestResult()
caseResult.message = messageStart = "testStart"
result.startTest(caseResult, device)
caseResult.message = messageStop = "testStop"
result.stopTest(caseResult, device)
for channel in channels:
self.assertEqual([messageStart, messageStop], channel.testBuffer)
def testThreadSafety(self):
channels = []
threads = []
for x in xrange(2):
channel = commons.DummyConcurentChannel("Test_%d" % x, True)
testresult.channels.scenario.append(channel)
channels.append(channel)
testRes = TestResult()
for x in xrange(4):
thread = OutputThread(testRes, TestCaseResult())
thread.start()
threads.append(thread)
for thread in threads:
thread.join()
for channel in channels:
self.assertTrue(channel.testBuffer)
self.assertTrue(channel.testBuffer == sorted(channel.testBuffer))
self.assertEqual(len(channel.testBuffer),
len(set(channel.testBuffer)))
class TestResultCoreDumpsTest(unittest.TestCase):
_TEST_CORE_DIR = os.path.abspath(os.path.join("tests", "_coredumps"))
_TEST_CORE_FILE = "file.core"
_TEST_CORE_PATTERN = ".+\.core"
def setUp(self):
self._origChannels = testresult.channels
testresult.channels = commons.DummyChannels(coreDumps=True)
self.testResult = TestResult()
section = settings.get(channels.CONFIG_NAME,
self.testResult._coreDumps.name, force=True)
self._origDirs = str(section["dirs"])
section["dirs"] = self._TEST_CORE_DIR
self._origPattern = str(section["pattern"])
section["pattern"] = self._TEST_CORE_PATTERN
if not os.path.exists(self._TEST_CORE_DIR):
os.mkdir(self._TEST_CORE_DIR)
self.testResult.start(None)
def tearDown(self):
self.testResult.stop()
testresult.channels = self._origChannels
section = settings.get(channels.CONFIG_NAME,
self.testResult._coreDumps.name, force=True)
section["dirs"] = self._origDirs
section["pattern"] = self._origPattern
if os.path.exists(self._TEST_CORE_DIR):
shutil.rmtree(self._TEST_CORE_DIR)
def testEnableCoreDumpsChannel(self):
result = TestCaseResult()
device = commons.FakeDevice()
self.testResult._coreDumps.setEnabled(False)
self.testResult.startTest(result, device)
core = os.path.join(self._TEST_CORE_DIR, self._TEST_CORE_FILE)
commons.createRandomSizeFile(core)
self.testResult._coreDumps.setEnabled(True)
self.testResult.stopTest(result, device)
execResult = result.device(device)
self.failIf(execResult.cores)
def testEnebledCoreDumpsChannel(self):
result = TestCaseResult()
device = commons.FakeDevice()
self.testResult._coreDumps.setEnabled(True)
self.testResult.startTest(result, device)
core = os.path.join(self._TEST_CORE_DIR, self._TEST_CORE_FILE)
commons.createRandomSizeFile(core)
self.testResult.stopTest(result, device)
execResult = result.device(device)
self.failUnlessEqual(len(execResult.cores), 1)
self.failUnless(core in execResult.cores)
def testDisableCoreDumpsChannel(self):
result = TestCaseResult()
device = commons.FakeDevice()
self.testResult._coreDumps.setEnabled(True)
self.testResult.startTest(result, device)
core = os.path.join(self._TEST_CORE_DIR, self._TEST_CORE_FILE)
commons.createRandomSizeFile(core)
self.testResult._coreDumps.setEnabled(False)
self.testResult.stopTest(result, device)
execResult = result.device(device)
self.failIf(execResult.cores)
def testDisabledCoreDumpsChannel(self):
result = TestCaseResult()
device = commons.FakeDevice()
self.testResult._coreDumps.setEnabled(False)
self.testResult.startTest(result, device)
core = os.path.join(self._TEST_CORE_DIR, self._TEST_CORE_FILE)
commons.createRandomSizeFile(core)
self.testResult.stopTest(result, device)
execResult = result.device(device)
self.failIf(execResult.cores)
if __name__ == "__main__":
unittest.main()
| tadek-project/tadek-common | tests/engine/testresult.py | testresult.py | py | 6,191 | python | en | code | 2 | github-code | 36 |
43570030497 | import warnings
from pymysql.tests import base
import pymysql.cursors
class CursorTest(base.PyMySQLTestCase):
def setUp(self):
super(CursorTest, self).setUp()
conn = self.connections[0]
self.safe_create_table(
conn,
"test", "create table test (data varchar(10))",
cleanup=True)
cursor = conn.cursor()
cursor.execute(
"insert into test (data) values "
"('row1'), ('row2'), ('row3'), ('row4'), ('row5')")
cursor.close()
self.test_connection = pymysql.connect(**self.databases[0])
self.addCleanup(self.test_connection.close)
def test_cleanup_rows_unbuffered(self):
conn = self.test_connection
cursor = conn.cursor(pymysql.cursors.SSCursor)
cursor.execute("select * from test as t1, test as t2")
for counter, row in enumerate(cursor):
if counter > 10:
break
del cursor
self.safe_gc_collect()
c2 = conn.cursor()
with warnings.catch_warnings(record=True) as log:
warnings.filterwarnings("always")
c2.execute("select 1")
self.assertGreater(len(log), 0)
self.assertEqual(
"Previous unbuffered result was left incomplete",
str(log[-1].message))
self.assertEqual(
c2.fetchone(), (1,)
)
self.assertIsNone(c2.fetchone())
def test_cleanup_rows_buffered(self):
conn = self.test_connection
cursor = conn.cursor(pymysql.cursors.Cursor)
cursor.execute("select * from test as t1, test as t2")
for counter, row in enumerate(cursor):
if counter > 10:
break
del cursor
self.safe_gc_collect()
c2 = conn.cursor()
c2.execute("select 1")
self.assertEqual(
c2.fetchone(), (1,)
)
self.assertIsNone(c2.fetchone())
| PyMySQL/Tornado-MySQL | tornado_mysql/tests/test_cursor.py | test_cursor.py | py | 1,959 | python | en | code | 408 | github-code | 36 |
30668828069 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
##
# @file genCode.py
# @brief 根据json文件生成错误码头文件以及文档
# 头文件:./release/tuya_error_code.y
# 文档 :./release/tuya_error_code.md
# @author huatuo
# @version 1.0.0
# @date 2021-09-27
# @note 支持参数1修改文件名,如:"base",使用默认命名则传递空:""
# @note 支持参数2传入指定模块列表,参数形式支持如下:
# "compA compB compC"
# "compA, compB, compC"
import json
import os
import sys
import shutil
import codecs
from datetime import date
# 为了兼容python2和python3
if sys.version_info < (3, 0):
reload(sys)
sys.setdefaultencoding('utf8')
file_head_template = """/*******************************************************************
* File: tuya_###MODULE###_error_code.h
* Author: auto generate by tuya code gen system
* Date: ###DATE###
* Description:this file defined the error code of tuya IOT
* you can change it manully if needed
* Copyright(C),2018-2020, tuya inc, www.tuya.comm
*******************************************************************/
#ifndef TUYA_###MODULE_UPPER###_ERROR_CODE_H
#define TUYA_###MODULE_UPPER###_ERROR_CODE_H
#ifdef __cplusplus
extern "C" {
#endif
"""
file_foot_template = """
#define ERRCODE2STRING(errcode) #errcode
#define TUYA_ERROR_STRING(errcode) ("[ErrCode: " ERRCODE2STRING(errcode) "]")
#define TUYA_CHECK_NULL_RETURN(x, y)\\
do{\\
if (NULL == (x)){\\
TAL_PR_ERR("%s null", #x);\\
return (y);\\
}\\
}while(0)
#define TUYA_CHECK_NULL_GOTO(x, label)\\
do{\\
if (NULL == (x)){\\
TAL_PR_ERR("%s null", #x);\\
goto label;\\
}\\
}while(0)
#define TUYA_CALL_ERR_LOG(func)\\
do{\\
rt = (func);\\
if (OPRT_OK != (rt)){\\
TAL_PR_ERR("ret:%d", rt);\\
}\\
}while(0)
#define TUYA_CALL_ERR_GOTO(func, label)\\
do{\\
rt = (func);\\
if (OPRT_OK != (rt)){\\
TAL_PR_ERR("ret:%d", rt);\\
goto label;\\
}\\
}while(0)
#define TUYA_CALL_ERR_RETURN(func)\\
do{\\
rt = (func);\\
if (OPRT_OK != (rt)){\\
TAL_PR_ERR("ret:%d", rt);\\
return (rt);\\
}\\
}while(0)
#define TUYA_CALL_ERR_RETURN_VAL(func, y)\\
do{\\
rt = (func);\\
if (OPRT_OK != (rt)){\\
TAL_PR_ERR("ret:%d", rt);\\
return (y);\\
}\\
}while(0)
#define TUYA_CALL_ERR_LOG_SEQ_RETURN_VAL(func, y, point)\\
do{\\
rt = (func);\\
if (OPRT_OK != (rt)){\\
TAL_PR_ERR("ret:%d", rt);\\
INSERT_ERROR_LOG_SEQ_DEC((point), rt);\\
return (y);\\
}\\
}while(0)
#define TUYA_CALL_ERR_LOG_SEQ_RETURN(func, point)\\
do{\\
rt = (func);\\
if (OPRT_OK != (rt)){\\
TAL_PR_ERR("ret:%d", rt);\\
INSERT_ERROR_LOG_SEQ_DEC((point), rt);\\
return (rt);\\
}\\
}while(0)
#define TUYA_CALL_ERR_LOG_SEQ_GOTO(func, label)\\
do{\\
rt = (func);\\
if (OPRT_OK != (rt)){\\
TAL_PR_ERR("ret:%d", rt);\\
INSERT_ERROR_LOG_SEQ_DEC((point), rt);\\
goto label;\\
}\\
}while(0)
#define TUYA_CALL_ERR_LOG_SEQ(func)\\
do{\\
rt = (func);\\
if (OPRT_OK != (rt)) {\\
TAL_PR_ERR("ret:%d", rt);\\
INSERT_ERROR_LOG_SEQ_DEC((point), rt);\\
}\\
}while(0)
#define TUYA_CHECK_NULL_LOG_SEQ_RETURN(x, y, point)\\
do{\\
if (NULL == (x)){\\
TAL_PR_ERR("%s null", #x);\\
INSERT_ERROR_LOG_SEQ_DEC((point), y);\\
return (y);\\
}\\
}while(0)
#define TUYA_CHECK_NULL_LOG_SEQ_GOTO(x, point, label)\\
do{\\
if (NULL == (x)){\\
TAL_PR_ERR("%s null", #x);\\
INSERT_ERROR_LOG_SEQ_NULL((point));\\
goto label;\\
}\\
}while(0)
#ifdef __cplusplus
}
#endif
#endif
"""
marco_head_template = """
/****************************************************************************
the error code marco define for module ###MODULE###
****************************************************************************/
"""
class codegen():
def __init__(self, file_name="", enable_list=[]):
self.output_path = "./release"
self.modules = None # json
self.file_name = "_" + file_name if len(file_name) else ""
self.enable_list = enable_list
print("file_name: ", file_name)
print("enable_list: ", enable_list)
def load_modules(self):
module_file = codecs.open("./module.json", "r", "utf-8")
self.modules = json.load(module_file, encoding='utf-8')['TuyaEmbeddedErrcode']
# 判断该组件错误码是否要被处理
def _modules_is_ok(self, module):
if (len(module['errcode']) == 0):
# print("module length is 0: ", module['name'])
return False
if (module['offset']>255) or (len(module['errcode'])>255):
print("module over offset: ", module['name'])
return False
if self.enable_list == []:
# 使能列表为空中则生成全部错误码
return True
if module['name'] not in self.enable_list:
# 模块没有被使能
return False
return True
def _gen_tbl_header(self, module_name, index, errcode_cnt):
tbl_header = "\n## " + str(index) + ". module " + '`' + module_name + '`' + "\n\n"
if errcode_cnt > 0:
tbl_header = tbl_header + " No. | Name | Value | message" + "\n"
tbl_header = tbl_header + "-------|-------|-------|--------" + "\n"
return tbl_header
def _gen_marco_item(self, module_name, errcode, offset, index):
marco_item = ""
marco_msg = ""
marco_val = ""
# prefix, global error code not have module_name
marco_item_prefix = "OPRT"+ "_" + module_name.upper() + "_"
marco_item_prefix_global = "OPRT" + "_"
# only one key in error code
for key in errcode:
if module_name == "global":
marco_item = marco_item_prefix_global + key
else:
marco_item = marco_item_prefix + key
error_val = "-%#06x" % ((offset<<8) + index)
marco_val = "(%s)" % error_val
marco_msg = str(int(error_val, 16)) + ", " + errcode[key]
return marco_item, marco_val, marco_msg
def gen_md(self):
file_content = "# Tuya Embedded software error code define" + "\n"
module_index = 0
for module in self.modules:
if not self._modules_is_ok(module):
continue
module_index += 1
file_content = file_content + self._gen_tbl_header(module["name"].upper(), module_index, len(module['errcode']))
index = 0
for errcode in module["errcode"]:
marco_item, marco_val, marco_msg = self._gen_marco_item(module["name"], errcode, module["offset"], index)
marco_item, marco_val, marco_msg = '`'+marco_item+'`', '`'+marco_val+'`', '`'+marco_msg+'`'
index += 1
file_content = file_content + str(index) + "|" + marco_item + "|" + str(marco_val) + "|" + marco_msg
file_content = file_content + "\n"
# 生成.md文件
file_name = self.output_path + "/tuya_###MODULE###_error_code.md".replace("_###MODULE###", self.file_name)
fd = open(file_name, 'w')
fd.write(file_content)
fd.close()
# print("OK!")
return
def gen_marco(self, module):
define = "#define"
marco_content_head = marco_head_template.replace("###MODULE###", module["name"].upper())
marco_content_body = ""
marco_item_prefix = "OPRT"+ "_" + module["name"].upper() + "_"
index = 0
for errcode in module["errcode"]:
marco_item, marco_val, marco_msg = self._gen_marco_item(module["name"], errcode, module["offset"], index)
index += 1
# appened to the marco content
marco_define = "%(d)s %(i)-50s %(v)+8s //%(m)s" % \
{'d':define, 'i':marco_item, 'v':marco_val, 'm':marco_msg}
marco_content_body = marco_content_body + marco_define + '\n'
max_marco_cnt = define + " " + marco_item_prefix + "ERRCODE_MAX_CNT" + " " + str(index)
marco_content_body = marco_content_body + max_marco_cnt + '\n\n'
marco_content = marco_content_head + marco_content_body
return marco_content
def gen_file(self):
file_name = self.output_path +"/tuya_###MODULE###_error_code.h".replace("_###MODULE###", self.file_name)
fd = open(file_name, 'w')
# head
file_head = file_head_template.replace("_###MODULE###", self.file_name)
file_head = file_head.replace("_###MODULE_UPPER###", self.file_name.upper())
file_head = file_head.replace("###DATE###", str(date.today()))
fd.write(file_head)
# marco
for module in self.modules:
if not self._modules_is_ok(module):
continue
marco_str = self.gen_marco(module)
fd.write(marco_str)
fd.write(file_foot_template)
fd.close()
# print("OK!")
return
def dogen(self):
# load all module description
self.load_modules()
# clean output path
shutil.rmtree(self.output_path, ignore_errors=True)
os.mkdir(self.output_path)
# gen .h file
# print("generate errcode .h file...")
rt = self.gen_file()
# gen .md file
# print("generate errcode .md file...")
self.gen_md()
return
if __name__ == '__main__':
file_name = ""
if len(sys.argv) > 1:
file_name = sys.argv[1]
enable_list = []
if len(sys.argv) > 2:
enable_list_str = sys.argv[2]
enable_list = enable_list_str.replace(',', ' ').split()
gen = codegen(file_name, enable_list)
gen.dogen()
| tuya/tuyaos-development-board-t2 | software/TuyaOS/scripts/error_code/genCode.py | genCode.py | py | 9,935 | python | en | code | 2 | github-code | 36 |
73857162662 |
import math
from SynRD.publication import Publication, Finding, VisualFinding, TAXONOMY
import numpy as np
import pandas as pd
import statsmodels as sm
import statsmodels.stats.weightstats
class Assari2019Baseline(Publication):
DEFAULT_PAPER_ATTRIBUTES = {
'id': 'assari2019baseline',
'length_pages': 15,
'authors': ['Shervin Assari', 'Mohsen Bazargan'],
'journal': 'International Journal of Environmental Research and Public Health',
'year': 2019,
'current_citations': 9, #number of citations the paper has or how many people have cited it?
'base_dataframe_pickle': 'assari2019ability_dataframe.pickle'
}
RACE_MAP = {
1: "White",
2: "Black"
}
GENDER_MAP = {
1: "Man",
2: "Woman"
}
FILENAME = 'assari2019baseline'
COLUMN_MAP = {"V2102": "Race", "V103": "Gender", "V2000": "Age", "V2007": "Education", "V2020": "Income", "V2637": "Smoking", "V2623": "BMI", "V2681": "HTN", "V13214": "Exercise", "V2203": "Depressive symptoms", "V915": "Health", "V1860": "Weight", "V15003": "Response pattern", "V836": "Stroke wave 1", "V4838": "Stroke wave 2", "V10225": "Stroke wave 3", "V12305": "Stroke wave 4", "V15944": "Stroke wave 5", "V12302": "Any stroke"}
corr_df = None
means = None
dead = None
def __init__(self, dataframe=None):
super(Assari2019Baseline, self).__init__(dataframe=dataframe)
self.FINDINGS = self.FINDINGS + [
Finding(self.finding_5_1, description="finding_5_1",
text="""Blacks were younger, had higher number of
chronic medical conditions at baseline in comparison to Whites.""",
finding_type=TAXONOMY.MEAN_DIFFERENCE.value.BETWEEN_CLASS),
Finding(self.finding_5_2, description="finding_5_2",
text="""Relative to White people, Black individuals had also
lower educational attainment (p < 0.05 for all).""",
finding_type=TAXONOMY.MEAN_DIFFERENCE.value.BETWEEN_CLASS),
Finding(self.finding_5_3, description="finding_5_3",
text="""Blacks also reported worse self-rated health (SRH) than
Whites (Table 1).""",
finding_type=TAXONOMY.MEAN_DIFFERENCE.value.BETWEEN_CLASS),
Finding(self.finding_5_6, description="finding_5_6",
text="""Similarly, overall, people had 12.53 years of schooling at
baseline (95%CI = 12.34-12.73).""",
finding_type=TAXONOMY.DESCRIPTIVE_STATISTICS),
Finding(self.finding_5_7, description="finding_5_7",
text="""A comparison of racial groups showed higher educational
attainment in Whites (12.69, 95%CI=12.48-12.90) than Blacks (11.37,95%CI
= 10.90-11.84). Thus, on average, Whites had more than 1.3 years higher
years [sic] of schooling than Blacks...""",
finding_type=TAXONOMY.MEAN_DIFFERENCE.value.BETWEEN_CLASS),
Finding(self.finding_5_8, description="finding_5_8",
text="""Of the 177 that died, 121 were White (68.36%) and 56 were
Black (31.64%).""",
finding_type=TAXONOMY.DESCRIPTIVE_STATISTICS),
Finding(self.finding_5_9, description="finding_5_9",
text="""Of the 177 that died, 33 were obese (18.64%) and 144 were
not obese (81.36%) at baseline.""",
finding_type=TAXONOMY.DESCRIPTIVE_STATISTICS),
Finding(self.finding_6_1, description="finding_6_1",
text="""In bivariate association, race was not associated with death
due to cerebrovascular (unadjusted HR for Blacks compared to
Whites = 0.78, 95% CI = 0.55-1.11), suggesting that Whites and
Blacks had similar risk of future cerebrovascular mortality over 25 years.""",
finding_type=TAXONOMY.CORRELATION.value.PEARSON_CORRELATION),
Finding(self.finding_6_2, description="finding_6_2",
text="""In bivariate association, baseline obesity was not associated
with future risk of cerebrovascular mortality (Unadjusted HR for
Blacks compared to Whites = 0.84, 95% CI = 0.45-1.56), suggesting
that Whites and Blacks had a similar risk of future cerebrovascular
mortality over 25 years.""",
finding_type=TAXONOMY.CORRELATION.value.PEARSON_CORRELATION),
Finding(self.finding_6_3, description="finding_6_3",
text="""Race (Black) was negatively associated with education and income""",
finding_type=TAXONOMY.CORRELATION.value.PEARSON_CORRELATION),
Finding(self.finding_6_4, description="finding_6_4",
text="""[race (Black) was]... positively associated with depressive
symptoms, hypertension, and obesity.""",
finding_type=TAXONOMY.CORRELATION.value.PEARSON_CORRELATION),
Finding(self.finding_6_5, description="finding_6_5",
text="""Blacks more frequently smoked and less frequently exercised.""",
finding_type=TAXONOMY.CORRELATION.value.PEARSON_CORRELATION),
Finding(self.finding_6_6, description="finding_6_6",
text="""Race was not associated with cerebrovascular death.""",
finding_type=TAXONOMY.CORRELATION.value.PEARSON_CORRELATION),
Finding(self.finding_6_7, description="finding_6_7",
text="""Baseline obesity was associated with female gender and less
education, income, smoking, and exercise.""",
finding_type=TAXONOMY.CORRELATION.value.PEARSON_CORRELATION),
Finding(self.finding_6_8, description="finding_6_8",
text="""Obesity at baseline was associated with depressive symptoms and
hypertension at baseline.""",
finding_type=TAXONOMY.CORRELATION.value.PEARSON_CORRELATION),
Finding(self.finding_6_9, description="finding_6_9",
text="""Obesity at baseline was not associated with cerebrovascular
death in the pooled sample (Table 2).""",
finding_type=TAXONOMY.CORRELATION.value.PEARSON_CORRELATION)
]
def _get_any_stroke_if_died(self, x):
response_pattern = str(x["Response pattern"])
if "4" not in response_pattern:
return 0 # patient did not die
for i in range(5):
if x[f"Stroke wave {i + 1}"] == 1:
return 1
return 0
def _recreate_dataframe(self, filename='assari2019baseline_dataframe.pickle'):
data = pd.read_csv('data/DS0001/04690-0001-Data.tsv', sep='\t')
data = data[self.COLUMN_MAP.keys()]
data.rename(columns=self.COLUMN_MAP, inplace=True)
data = data[(data["Race"] == 1) | (data["Race"] == 2)] # 1 = white, 2 = Black
data["Educational attainment"] = data.apply(lambda x: 1 if x["Education"] >= 12 else 0, axis=1)
data["Obesity"] = data.apply(lambda x: 1 if x["BMI"] > 30 else 0, axis=1)
data["Health binary"] = data.apply(lambda x: 1 if x["Health"] in [1, 2, 3] else 0, axis=1)
data["Death to cerebrovascular disease"] = data.apply(lambda x: self._get_any_stroke_if_died(x), axis=1)
data['Exercise'] = data['Exercise'].replace(-99.0, 0)
data.drop(columns=['Stroke wave 1', 'Stroke wave 2', 'Stroke wave 3', 'Stroke wave 4', 'Stroke wave 5','Response pattern', 'Any stroke'], inplace=True)
data.to_pickle(filename)
return data
def get_corr(self):
if self.corr_df is None:
corr_df = self.dataframe[['Race', 'Age', 'Gender', 'Education', 'Income', 'Smoking', 'Exercise', 'Depressive symptoms', 'HTN', 'Obesity', 'Death to cerebrovascular disease']]
self.corr_df = corr_df.corr()
return self.corr_df
def get_race_pools_with_means(self):
if self.means is None:
black_pool = self.dataframe.loc[self.dataframe['Race'] == 2]
white_pool = self.dataframe.loc[self.dataframe['Race'] == 1]
black_pool_means, white_pool_means = self._get_adjusted_means(black_pool), self._get_adjusted_means(white_pool)
means = pd.concat([black_pool_means, white_pool_means])
means['Race'] = ['Black', 'White']
means.set_index('Race', inplace=True)
self.means = means
return self.means
def _get_adjusted_means(self, data_sample):
temp_means = np.around(sm.stats.weightstats.DescrStatsW(data_sample, weights=data_sample['Weight']).mean, 4)
return pd.DataFrame(data=[temp_means], columns=data_sample.columns)
def get_dead(self):
if self.dead is None:
self.dead = self.dataframe.loc[self.dataframe['Death to cerebrovascular disease'] == 1]
return self.dead
def finding_5_1(self):
"""Blacks were younger, had higher number of chronic medical conditions at baseline in comparison to Whites."""
means = self.get_race_pools_with_means()
black_age = means['Age']['Black']
white_age = means['Age']['White']
black_htn = means['HTN']['Black']
white_htn = means['HTN']['White']
values = [black_age, white_age, black_htn, white_htn]
soft_finding = black_age < white_age and black_htn > white_htn
return (values, soft_finding, values)
def finding_5_2(self):
"""Relative to White people, Black individuals had also lower educational attainment (p < 0.05 for all)."""
means = self.get_race_pools_with_means()
black_education = means['Education']['Black']
white_education = means['Education']['White']
values = [black_education, white_education]
soft_finding = black_education < white_education
return (values, soft_finding, values)
def finding_5_3(self):
"""Blacks also reported worse self-rated health (SRH) than Whites (Table 1)."""
means = self.get_race_pools_with_means()
black_health = means['Health']['Black']
white_health = means['Health']['White']
values = [black_health, white_health]
soft_finding = black_health > white_health # note 1 = excellent, 5 = poor
return (values, soft_finding, values)
def finding_5_4(self):
"""The overall prevalence of DM was 5.73%, (95%CI = 4.80-6.82)."""
pass
def finding_5_5(self):
"""DM was more common in Blacks (9.22%, 95%CI = 7.75-10.95) than Whites (5.25%, 95%CI = 4.2.4-6.50)."""
pass
def finding_5_6(self):
"""Similarly, overall, people had 12.53 years of schooling at baseline (95%CI = 12.34-12.73)."""
means = self._get_adjusted_means(self.dataframe)
years_schooling = means['Education'][0]
# soft_finding = round(years_schooling, 2) == 12.53
soft_finding = np.allclose(float(round(years_schooling, 2)), 12.53, atol=0.2)
return ([years_schooling], soft_finding, [years_schooling])
def finding_5_7(self):
"""A comparison of racial groups showed higher educational attainment in Whites (12.69, 95%CI=12.48-12.90) than Blacks (11.37,95%CI = 10.90-11.84). Thus, on average, Whites had more than 1.3 years higher years [sic] of schooling than Blacks..."""
means = self.get_race_pools_with_means()
white_education = means['Education']['White']
black_education = means['Education']['Black']
values = [white_education, black_education]
soft_finding = white_education > black_education + 1.2
return (values, soft_finding, values)
def finding_5_8(self):
"""Of the 177 that died, 121 were White (68.36%) and 56 were Black (31.64%). Note that we were unable to reproduce this result."""
dead = self.get_dead()
total = dead.shape[0]
black_count = dead.loc[dead['Race'] == 2].shape[0]
white_count = dead.loc[dead['Race'] == 1].shape[0]
values = [total, white_count, black_count]
# soft_finding = total == 177 and white_count == 121 and black_count == 56
white_percentage = float(white_count) / float(total)
soft_finding = np.allclose(white_percentage, 0.68, atol=0.05)
return (values, soft_finding, values)
def finding_5_9(self):
"""Of the 177 that died, 33 were obese (18.64%) and 144 were not obese (81.36%) at baseline. Note that we were unable to reproduce this result."""
dead = self.get_dead()
total = dead.shape[0]
obese_count = dead.loc[dead['Obesity'] == 1].shape[0]
not_obese_count = dead.loc[dead['Obesity'] == 0].shape[0]
values = [total, obese_count, not_obese_count]
# soft_finding = total == 177 and obese_count == 33 and not_obese_count == 144
obese_percentage = float(obese_count) / float(total)
soft_finding = np.allclose(obese_percentage, 0.18, atol=0.05)
return (values, soft_finding, values)
def finding_6_1(self):
"""In bivariate association, race was not associated with death due to cerebrovascular (unadjusted HR for Blacks compared to Whites = 0.78, 95% CI = 0.55-1.11), suggesting that Whites and Blacks had similar risk of future cerebrovascular mortality over 25 years."""
corr_df = self.get_corr()
corr_race_death = corr_df['Race'].loc['Death to cerebrovascular disease']
soft_finding = abs(corr_race_death) < 0.03
return ([corr_race_death], soft_finding, [corr_race_death])
def finding_6_2(self):
"""In bivariate association, baseline obesity was not associated with future risk of cerebrovascular mortality (Unadjusted HR for Blacks compared to Whites = 0.84, 95% CI = 0.45-1.56), suggesting that Whites and Blacks had a similar risk of future cerebrovascular mortality over 25 years."""
corr_df = self.get_corr()
corr_obesity_death = corr_df['Obesity'].loc['Death to cerebrovascular disease']
soft_finding = abs(corr_obesity_death) < 0.03
return ([corr_obesity_death], soft_finding, [corr_obesity_death])
# TODO: check that race correlation is for Black
def finding_6_3(self):
"""Race (Black) was negatively associated with education and income"""
corr_df = self.get_corr()
values = [corr_df['Race'].loc['Education'], corr_df['Race'].loc['Income']]
soft_finding = all(x < 0 for x in values)
return (values, soft_finding, values)
# TODO: check that race correlation is for Black
def finding_6_4(self):
"""[race (Black) was]... positively associated with depressive symptoms, hypertension, and obesity. Note that we were unable to reproduce this result."""
corr_df = self.get_corr()
values = [corr_df['Race'].loc['Depressive symptoms'], corr_df['Race'].loc['HTN'], corr_df['Race'].loc['Obesity']]
soft_finding = all(x > 0 for x in values)
return (values, soft_finding, values)
# TODO: check that race correlation is for Black
def finding_6_5(self):
"""Blacks more frequently smoked and less frequently exercised.""" # implies positive correlation with smoking and negative with exercise
corr_df = self.get_corr()
values = [corr_df['Race'].loc['Smoking'], corr_df['Race'].loc['Exercise']]
soft_finding = values[0] > 0 and values[1] < 0
return (values, soft_finding, values)
# TODO: check that race correlation is for Black
def finding_6_6(self):
"""Race was not associated with cerebrovascular death.""" # same as finding_6_1?
corr_df = self.get_corr()
corr_race_death = corr_df['Race'].loc['Death to cerebrovascular disease']
soft_finding = abs(corr_race_death) < 0.05
return ([corr_race_death], soft_finding, [corr_race_death])
# TODO: check that gender correlation is for female
def finding_6_7(self):
"""Baseline obesity was associated with female gender and less education, income, smoking, and exercise."""
corr_df = self.get_corr()
values = [corr_df['Obesity'].loc['Gender'], corr_df['Obesity'].loc['Education'], corr_df['Obesity'].loc['Income'], corr_df['Obesity'].loc['Smoking'], corr_df['Obesity'].loc['Exercise']]
soft_finding = values[0] > 0 and all(x < 0 for x in values[1:])
return (values, soft_finding, values)
def finding_6_8(self):
"""Obesity at baseline was associated with depressive symptoms and hypertension at baseline. Note that we were unable to reproduce this result."""
corr_df = self.get_corr()
values = [corr_df['Obesity'].loc['Depressive symptoms'], corr_df['Obesity'].loc['HTN']]
soft_finding = all(x > 0 for x in values)
return (values, soft_finding, values)
def finding_6_9(self):
"""Obesity at baseline was not associated with cerebrovascular death in the pooled sample (Table 2).""" # same as finding_6_2?
corr_df = self.get_corr()
corr_obesity_death = corr_df['Obesity'].loc['Death to cerebrovascular disease']
soft_finding = abs(corr_obesity_death) < 0.05
return ([corr_obesity_death], soft_finding, [corr_obesity_death])
def finding_6_10(self):
"""According to Model 1 in the pooled sample, baseline obesity did not predict cerebrovascular mortality (HR = 0.86, 0.49-1.51), independent of demographic, socioeconomic, health behaviors, and health factors at baseline."""
pass
def finding_6_11(self):
"""According to Model 2, race interacted with baseline obesity on outcome (HR = 3.17, 1.09-9.21), suggesting a stronger association between baseline obesity and future risk for cerebrovascular deaths for Blacks, in comparison to Whites (Table 3)."""
pass
def finding_6_12(self):
"""As Model 3 shows, obesity did not predict the outcome in Whites (HR = 0.69, 0.31-1.53)."""
pass
def finding_6_13(self):
"""Model 4 shows that obesity predicts risk of cerebrovascular mortality for Blacks (HR = 2.51, 1.43-4.39) (Table 4)."""
pass | DataResponsibly/SynRD | SynRD/papers/assari2019baseline.py | assari2019baseline.py | py | 18,601 | python | en | code | 0 | github-code | 36 |
25578075693 | import random
def print_board(b):
print()
print("Board:")
size = len(b)
for i in range(size):
print(b[i], end="")
print()
def main():
board = []
for i in range(8):
board.append(random.randint(0, 9))
print_board(board)
kyle_score = 0
class_score = 0
turn = 0
while (len(board) > 0):
print("Kyle:", kyle_score, "-vs- Class:", class_score)
print("It is player #"+str(turn % 2+1)+"'s turn")
print_board(board)
side = input("What side do you choose?\n")
if (side == "left"):
score = board[0]
if (turn % 2 == 0):
kyle_score += score
else:
kyle_score += score
turn += 1
board.pop(-1)
print("Final Results")
print("Kyle:", kyle_score)
print("Class:", class_score)
if (kyle_score > class_score):
print("I told you... He won by", kyle_score-class_score)
elif (kyle_score == class_score):
print("You tied")
else:
print("Class wins! This will never happen ")
main()
| romantmanuel/COP2500C | Notes (10-16-23) - PyGame.py | Notes (10-16-23) - PyGame.py | py | 1,145 | python | en | code | 0 | github-code | 36 |
38871003901 | """
Solution of Codechef Problem - Chef on a trip
Problem Code - CHEFTRAV
Link - https://www.codechef.com/problems/CHEFTRAV
"""
import sys
out = sys.stdout
testcases = int(input())
while testcases > 0:
n = int(input())
places = {}
end_city = set([])
for i in range(n):
source = input()
destination = input()
places[source] = destination
end_city.add(destination)
start_City = ""
for k in places.keys():
if k not in end_city:
start_City = k
break
for i in range(n):
out.write(start_City +"-"+places[start_City]+" ")
start_City = places[start_City]
out.write("\n")
testcases -= 1 | karanm97/codingProblems | cheftrav.py | cheftrav.py | py | 661 | python | en | code | 0 | github-code | 36 |
10989202262 | import random
#Colgadito es la lista de palabras para jugar
colgadito = ["puma", "chinchilla", "pudu", "huemul", "condor", "güiña", "guanaco"]
#Figura es lo que va mostrando la consecuencia de los errores del jugador
figura = ['''
+++++
|
|
|
====''', '''
+++++
O |
|
|
====''', '''
+++++
O |
| |
|
====''', '''
+++++
O |
/| |
|
====''', '''
+++++
O |
/|\ |
|
====''', '''
+++++
O |
/|\ |
/ |
====''', '''
+++++
O |
/|\ |
/ \ |
====''']
#Elige palabra para adivinar
def obtener_palabra_aleatoria(listado):
animal = random.choice(listado) #Elige una palabra del listado aleatoria
return animal
palabra_aleatoria = obtener_palabra_aleatoria(colgadito)#Queda registrado el animal que retornó la función anterior
#Crea lista vacía, que se llama lista_palabra
lista_palabra = []
for letra in palabra_aleatoria: #Agrega los caracteres de la palabra_aleatoria como _ a lista_palabra
lista_palabra.append("_")
def actualiza_tablero(letra_adivinada, palabra_secreta):
adivinadas = 0
if letra_adivinada in lista_palabra:
print("La letra ya fue agregada")
else:
if letra_adivinada in palabra_secreta:
for letra in range(len(palabra_secreta)): #Recorre los caracteres de la palabra_secreta
if palabra_secreta[letra] == letra_adivinada: #Pregunta si la letra adivinada está en la palabra_secreta
lista_palabra[letra] = letra_adivinada #Muestra letra donde corresponde
adivinadas += 1 #Contador Letras adivinadas
#retornar letras adivinadas
print(lista_palabra) #Mostrando tablero del estado de la palabra
return adivinadas
contador_puntaje = 120 #Parámetro para ir descontando de a 20 puntos los errores
def calcula_puntaje(puntaje):
puntaje -=20
print("Tienes ", puntaje, " puntos.")
return puntaje
#Actualización de tablero
#El programa tiene 6 intentos
contador = 0 #Va manejando cuántas letras tengo adivinadas
contador_personaje = 0 #Va agregando partes al colgado mediante el índice
print(lista_palabra)
while True:
print(figura[contador_personaje])
letra = input("Ingresa una letra: ")
if letra in palabra_aleatoria:
letras_adivinadas = actualiza_tablero(letra, palabra_aleatoria)
contador += letras_adivinadas
print("Letras adivinadas: ",contador)
print("Esta palabra tiene ", len(palabra_aleatoria), " letras.")
print("Tienes ", contador_puntaje, " puntos.")
if contador == len(palabra_aleatoria):
print("Ganaste! Lo salvaste")
break
else:
print("No está la letra en la palabra")
contador_personaje += 1
contador_puntaje = calcula_puntaje(contador_puntaje)
if contador_personaje == 6:
print(figura[contador_personaje])
print("Perdiste! Lo has ahorcado. Vuelve a intentarlo!")
break
| Joaoguzman/PROYECTO-JUEGO-COLGADO | colgado.py | colgado.py | py | 2,959 | python | es | code | 0 | github-code | 36 |
28580043069 | from django.shortcuts import render
from rest_framework import generics
from rest_framework.permissions import IsAuthenticated
from order.models import Order
from order.serializers.order import OrderSerializer
class OrderView(generics.ListCreateAPIView):
queryset = Order.objects.all()
serializer_class = OrderSerializer
permission_classes = [IsAuthenticated]
def get_queryset(self):
user = self.request.user
queryset = Order.objects.filter(created_by=user)
return queryset
class OrderDetails(generics.RetrieveUpdateAPIView):
queryset = Order.objects.all()
serializer_class = OrderSerializer
permission_classes = [IsAuthenticated]
def get_queryset(self):
user = self.request.user
queryset = Order.objects.filter(created_by=user)
return queryset
def orders(request):
orders = Order.objects.filter(created_by=request.user)
print(orders)
return render(request, "home/orders.html", {"orders": orders})
| PROFabdalla/payment_app | order/views.py | views.py | py | 999 | python | en | code | 0 | github-code | 36 |
734577939 |
# https://randerson112358.medium.com/email-spam-detection-using-python-machine-learning-abe38c889855
# https://blog.textedly.com/spam-text-message-examples
#Import libraries
import numpy as numpy
import pandas as panda
import nltk
from nltk.corpus import stopwords
import string
#Load the data
# dataFrame = panda.read_csv('db/emails.csv')
dataFrame = panda.read_csv('db/test.csv')
# print(dataFrame.head(5))
#Print the shape (Get the number of rows and cols)
result = dataFrame.shape
# print (result)
#Get the column names
dataFrame.columns
# print(dataFrame.columns)
#Checking for duplicates and removing them
dataFrame.drop_duplicates(inplace=True)
# result = dataFrame.shape
# print (result)
#Show the number of missing (NAN, NaN, na) data for each column
result = dataFrame.isnull().sum()
# print (result)
#Need to download stopwords
# nltk.download('stopwords')
# Tokenization (a list of tokens), will be used as the analyzer
# 1.Punctuations are [!"#$%&'()*+,-./:;<=>?@[\]^_`{|}~]
# 2.Stop words in natural language processing, are useless words (data).
def process_text(text):
# 1 Remove punctuation
text_without_punctuation = [char for char in text if char not in string.punctuation]
text_without_punctuation = ''.join(text_without_punctuation)
# 2 Remove Stop Words
text_without_stop_words = [word for word in text_without_punctuation.split() if word.lower() not in stopwords.words('english')]
# 3 Return a list of clean words
return text_without_stop_words
#Show the Tokenization (a list of tokens )
# print (dataFrame['text'].head().apply(process_text))
# Convert the text into a matrix of token counts.
from sklearn.feature_extraction.text import CountVectorizer
messages_bow = CountVectorizer(analyzer=process_text).fit_transform(dataFrame['text'])
# print (messages_bow)
#Split data into 80% training & 20% testing data sets
from sklearn.model_selection import train_test_split
X_train, X_test, y_train, y_test = train_test_split(messages_bow, dataFrame['spam'], test_size=0.20, random_state=0)
#Get the shape of messages_bow
# messages_bow.shape
# print (messages_bow.shape)
# Create and train the Multinomial Naive Bayes classifier which is suitable for classification with discrete features (e.g., word counts for text classification)
from sklearn.naive_bayes import MultinomialNB
classifier = MultinomialNB()
classifier.fit(X_train, y_train)
# #Print the predictions
# print(classifier.predict(X_train))
#
#
# print ('divider')
# # Print the actual values
# print(y_train.values)
#Evaluate the model on the training data set
from sklearn.metrics import classification_report,confusion_matrix, accuracy_score
pred = classifier.predict(X_train)
print(classification_report(y_train, pred))
print('Confusion Matrix: \n', confusion_matrix(y_train, pred))
print()
print('Accuracy: ', accuracy_score(y_train, pred))
# #Print the predictions
# print('Predicted value: ', classifier.predict(X_test))
# print ('divider')
# #Print Actual Label
# print('Actual value: ', y_test.values)
#Evaluate the model on the test data set
from sklearn.metrics import classification_report,confusion_matrix, accuracy_score
pred = classifier.predict(X_test)
print(classification_report(y_test ,pred ))
print('Confusion Matrix: \n', confusion_matrix(y_test,pred))
print()
print('Accuracy: ', accuracy_score(y_test,pred))
| hatem-elsheref/email-spam-detection | main.py | main.py | py | 3,394 | python | en | code | 0 | github-code | 36 |
71873734503 | import pygame as pg
pg.init()
COLOR_INACTIVE = pg.Color('lightskyblue3')
COLOR_ACTIVE = pg.Color('dodgerblue2')
FONT = pg.font.Font(None, 32)
class InputBox:
def __init__(self, x, y, w, h, text=''):
self.rect = pg.Rect(x, y, w, h)
self.color = COLOR_INACTIVE
self.text = text
self.saved_text = ''
self.txt_surface = FONT.render(text, True, self.color)
self.active = False
def handle_event(self, event):
if event.type == pg.MOUSEBUTTONDOWN:
# If the user clicked on the input_box rect.
if self.rect.collidepoint(event.pos):
# Toggle the active variable.
self.active = not self.active
else:
self.active = False
# Change the current color of the input box.
self.color = COLOR_ACTIVE if self.active else COLOR_INACTIVE
if event.type == pg.KEYDOWN:
if self.active:
if event.key == pg.K_RETURN:
#print(self.text)
self.saved_text = self.text
self.text = ''
elif event.key == pg.K_BACKSPACE:
self.text = self.text[:-1]
else:
self.text += event.unicode
# Re-render the text.
self.txt_surface = FONT.render(self.text, True, self.color)
def update(self):
# Resize the box if the text is too long.
width = max(200, self.txt_surface.get_width()+10)
self.rect.w = width
def draw(self, screen):
# Blit the text.
screen.blit(self.txt_surface, (self.rect.x+5, self.rect.y+5))
# Blit the rect.
pg.draw.rect(screen, self.color, self.rect, 2) | andreidumitrescu95/Python-Sorting-Algorithm-Visualizer | Helper/input_helper.py | input_helper.py | py | 1,768 | python | en | code | 3 | github-code | 36 |
14625474319 | from django.shortcuts import render,redirect
from .forms import RegisterForm
# Create your views here.
def register(request):
# 从 get 或者 post 请求中获取 next 参数值
# get 请求中,next 通过 url 传递,即 /?next=value
# post 请求中,next 通过表单传递,即 <input type="hidden" name="next" value="{{ next }}"/>
redirect_to = request.POST.get('next', request.GET.get('next', ''))
#当请求为post 表示用户注册了信息
if request.method =='POST':
#实例化一个Form
# request.POST 是一个类字典数据结构,记录了用户提交的注册信息
# 这里提交的就是用户名(username)、密码(password)、邮箱(email)
# 用这些数据实例化一个用户注册表单
auth_forms=RegisterForm(request.POST)
# 验证数据的合法性
if auth_forms.is_valid():
# 如果提交数据合法,调用表单的 save 方法将用户数据保存到数据库
auth_forms.save()
# 注册成功,返回成功界面
#return redirect(reverse('users:success'))
return redirect(redirect_to)
# 请求不是 POST,表明用户正在访问注册页面,展示一个空的注册表单给用户
else:
auth_forms = RegisterForm()
# 渲染模板
# 如果不是 POST 请求,则渲染的是一个空的表单
# 如果用户通过表单提交数据,但是数据验证不合法,则渲染的是一个带有错误信息的表单
return render(request, 'users/register.html', {'auth_forms': auth_forms,'next':redirect_to}) | rainy0824/blog_project | users/views.py | views.py | py | 1,686 | python | zh | code | 0 | github-code | 36 |
14159945677 | """the errors of the two systems are compared"""
with open("../../iob_outputs_from_html/emBERT_eval/result.conllup", "r", encoding="utf-8") as f:
lines = f.readlines()
error = 0
sent = ""
diff = list()
for i in range(len(lines)):
if i == 0:
continue
if lines[i] != "\n":
if lines[i + 1].split("\t")[0] in [",", ".", "!", ":", "?", "(", ")", "_", "-", "'", '"', "/", "=", "%"]:
sent += lines[i].split("\t")[0]
elif lines[i].split("\t")[0] in ["-"]:
sent += lines[i].split("\t")[0]
else:
sent += lines[i].split("\t")[0] + " "
word = lines[i].split("\t")[0]
gold = lines[i].split("\t")[1]
spacy = lines[i].split("\t")[2]
bert = lines[i].split("\t")[3][:-1]
if gold == "O" and spacy == "O" and bert == "O":
continue
else:
# gold != "O" and spacy != "O" and bert != "O":
word_end_pos = len(sent) - 1
word_start_pos = len(sent) - len(word) - 1
diff.append(word + "\t" + gold + "\t" + spacy + "\t" + bert + "\t" + str(word_start_pos) + "\t" + str(word_end_pos))
error += 1
else:
if error != 0:
with open("../../iob_outputs_from_html/emBERT_eval/diff_v5.txt", "a", encoding="utf-8") as f:
f.write(sent + "\n")
for i in range(len(diff)):
f.write(diff[i] + "\n")
f.write("\n")
error = 0
sent = ""
diff = list()
| huspacy/huspacy-resources | scripts/ner_data_analysis/spacy_bert_diff/visualization/spacy_bert_check_differences.py | spacy_bert_check_differences.py | py | 1,516 | python | en | code | 0 | github-code | 36 |
43319941106 | import torch
import shutil
import torch.optim as optim
import numpy as np
from torch import nn
from torch.utils.data import DataLoader
from torchvision import transforms
from utils import AverageMeter, min_max_normalize, save_checkpoint
from dataset import DAEDataset
from model import ModelRec
transform = transforms.Compose([transforms.Resize((300, 400)),
transforms.ToTensor(),
transforms.Lambda(lambda tensor: min_max_normalize(tensor))])
#working directory
root = '/content'
train_dataset = DAEDataset(root, mode='train', transform=transform)
val_dataset = DAEDataset(root, mode='val', transform=transform)
train_loader = DataLoader(dataset=train_dataset, batch_size=16, shuffle=True)
val_loader = DataLoader(dataset=val_dataset, batch_size=2, shuffle=True)
def train(train_loader, val_loader, model, optimizer, scheduler, criterion, num_epochs):
best_loss = 1
for epoch in range(num_epochs):
scheduler.step()
train_loss = AverageMeter()
val_loss = AverageMeter()
model.train()
for input, target in train_loader:
input = input.cuda()
target = target.cuda()
optimizer.zero_grad()
output = model(input)
loss = criterion(output, target)
train_loss.up_date(loss, input.size(0))
loss.backward()
optimizer.step()
print('Epoch {} of {}, Train_loss: {:.3f}'.format(epoch + 1, num_epochs, train_loss.avg))
model.eval()
for input, target in val_loader:
input = input.cuda()
target = target.cuda()
output = model(input)
loss = criterion(output, target)
val_loss.up_date(loss, input.size(0))
is_best = val_loss.avg < best_loss
best_loss = min(val_loss.avg, best_loss)
save_checkpoint({
'epoch': epoch + 1,
'arch': experiment,
'state_dict': model.state_dict(),
'best_prec1': best_loss,
'optimizer': optimizer.state_dict(),
}, is_best)
print('Epoch {} of {}, Val_loss: {:.3f}'.format(epoch + 1, num_epochs, val_loss.avg))
def main():
torch.cuda.empty_cache()
model = ModelRec().cuda()
criterion = nn.BCELoss().cuda()
optimizer = optim.Adam(model.parameters())
scheduler = torch.optim.lr_scheduler.StepLR(optimizer, step_size = 50, gamma=0.1)
experiment = "DAE_Conv"
num_epochs = 200
#checkpoint = torch.load('/content/checkpoint.pth.tar')
#model.load_state_dict(checkpoint['state_dict'])
train(train_loader, val_loader, model, optimizer, scheduler, criterion, num_epochs)
if __name__ == '__main__':
main()
| ANnick2908/Data_Science_Projects | Denoising_Autoencoder/train.py | train.py | py | 2,622 | python | en | code | 0 | github-code | 36 |
7175166932 | import matplotlib.pyplot as plt
import cv2
import time
start_time = time.time()
# Titik awal
x1 = 1
y1 = 7
# Titik Akhir
x2 = 7
y2 = 15
dy = y2-y1
dx = x2-x1
step = dx if dx > dy else dy
xInc = dx / step
yInc = dy / step
x=x1
y=y1
len=y2+x2
# x axis value list.
x_number_list = []
x_number_list.append(x1)
# y axis value list.
y_number_list = []
y_number_list.append(y1)
for i in range(1,len):
x = x + xInc
y = y + yInc
xBulat=int(x)
yBulat=int(y)
x_number_list.append(xBulat)
y_number_list.append(yBulat)
if(x>=x2 and y>=y2):
break
end_time = time.time()
delta_time = end_time-start_time
print("Execution Time : ",delta_time," ms")
from guppy import hpy
h = hpy()
print (h.heap())
# Draw point based on above x, y axis values.
plt.scatter(x_number_list, y_number_list, s=10)
# Set chart title.
plt.title("Algoritma DDA")
# Set x, y label text.
plt.xlabel("X")
plt.ylabel("Y")
plt.show() | albirrkarim/dda-bresenham | dda.py | dda.py | py | 966 | python | en | code | 0 | github-code | 36 |
25544326890 | import numpy as np
import cvxpy as cp
import matplotlib.pyplot as plt
# double integrator
# state is [x, vx, y, vy]
T = 0.1 # timestep in seconds
A = np.kron(np.eye(2),np.array([[1, T], [0, 1]]))
B = np.kron(np.eye(2), np.array([[0.5*T**2], [T]]))
class MPC:
def __init__(self, prediction_horizon = 10, number_of_agents = 1, umax=100.0, obstacles=[]):
self.N = prediction_horizon
self.num_agents = number_of_agents
self.obstacles = obstacles
self.umax = umax
self.solution_X = None
self.solution_U = None # updated once solve is called
return
def plot(self):
if self.solution_X is None:
print("Please call solve first")
return
# plot the trajectories
plt.figure()
for i in range(self.num_agents):
xs = [x[0] for x in self.solution_X[i]]
ys = [x[2] for x in self.solution_X[i]]
plt.plot(xs, ys, label=f"Agent {i}")
plt.scatter([xs[0]], [ys[0]], marker="o", label=None)
plt.scatter([xs[-1]], [ys[-1]], marker="x", label=None)
plt.grid()
plt.xlabel("x [m]")
plt.ylabel("y [m]")
plt.legend()
# plt.show()
plt.savefig("plot.png")
def solve(self, x0, xT):
# x0 is a list of initial conditions
# xT is a list of target states
### constuct the MPC problem
X = [[cp.Variable(4) for _ in range(self.N)] for i in range(self.num_agents)]
# X[i][k] is the i-th agents state at time k
U = [[cp.Variable(2) for _ in range(self.N-1)] for i in range(self.num_agents)]
### create constraints
constraints = []
# initial and final conditions
for i in range(self.num_agents):
constraints.append(X[i][0] == x0[i])
constraints.append(X[i][-1] == xT[i])
# dynamics constraints
for i in range(self.num_agents):
for k in range(self.N-1): # at each timestep
constraints.append(X[i][k+1] == A @ X[i][k] + B @ U[i][k])
# input constraints
for i in range(self.num_agents):
for k in range(self.N-1):
constraints.append(cp.norm(U[i][k], "inf") <= self.umax)
### construct the objective function
objective = sum(sum(cp.sum_squares(uk) for uk in Ui) for Ui in U)
### call a solver
prob = cp.Problem(cp.Minimize(objective), constraints)
prob.solve()
### save the trajectory
self.solution_X = [[x.value for x in Xi] for Xi in X]
self.solution_U = [[u.value for u in Ui] for Ui in U]
### return instantaneous control input
return [u[0] for u in self.solution_U]
mpc = MPC(number_of_agents=2)
x0 = [np.array([0,0,0,0]), np.array([1,0,0,0])]
xT = [np.array([1,0,1,0]), np.array([0,0,1,0])]
mpc.solve(x0, xT)
mpc.plot() | dev10110/interagent_mpc | no_obstacle_mpc.py | no_obstacle_mpc.py | py | 2,690 | python | en | code | 0 | github-code | 36 |
70848016743 | import sys
import os.path
import re
import warnings
from io import StringIO
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import statsmodels.api as sm
from patsy import dmatrices
import six
class PerfData():
__DATETIME_HEADER__ = "start-time"
__PERF_HEADER__ = __DATETIME_HEADER__ + ",response-time"
def __init__(self, filename):
self._filename = filename
def data(self):
with open(self._filename) as f:
_data = f.read()
return _data
def headers(self):
return self.__PERF_HEADER__
def datetime_headers(self):
return self.__DATETIME_HEADER__
class PerformanceRunIterator():
def __init__(self, data, header):
self._data = data
self._current_index = 0
self._perf_header = header
def __iter__(self):
self._header_indexes = [m.start() for m in re.finditer(self._perf_header, self._data)]
self._header_indexes.append(len(self._data))
return self
def next(self):
if self._current_index + 1 >= len(self._header_indexes):
raise StopIteration
line = self._line_at_index(self._current_index)
self._current_index = self._current_index + 1
return line
def _line_at_index(self, position):
start = self._header_indexes[position]
end = self._header_indexes[position + 1]
line = self._data[start:end]
return six.text_type(line)
def read_throughput_data(filename):
perf_data = PerfData(filename)
df = pd.DataFrame()
for run in PerformanceRunIterator(perf_data.data(), perf_data.headers()):
run_dataframe = pd.read_csv(StringIO(run), parse_dates=[perf_data.datetime_headers()])
trimmed_section = trim_edges(run_dataframe)
if len(trimmed_section) > 0:
df = df.append(trimmed_section)
# Reset the index because it is a Frankenstein of smaller indexes
df = df.reset_index().drop('index', axis=1)
return df
def trim_edges(data):
indexes = data.set_index('start-time').resample('1S').aggregate(lambda x: 1).index
test_start_time = indexes[0]
test_end_time = indexes[-1]
return data[(data['start-time'] >= test_start_time) & (data['start-time'] <= test_end_time)]
def process_throughput_data(data):
buckets = data.set_index('start-time')['response-time'].resample('1S')
throughput_data_set = buckets.aggregate({"throughput": lambda x: 0 if x.count() == 0 else x.count()})
throughput_data_set = throughput_data_set.reset_index()
throughput_data_set = throughput_data_set.fillna(method='ffill')
return buckets, throughput_data_set
def generate_fit_line(data):
y, x = dmatrices('latency ~ throughput', data=data, return_type='dataframe')
fit = sm.GLM(y, x, family=sm.families.InverseGaussian(sm.families.links.inverse_squared)).fit()
max_throughput = data['throughput'].max()
min_throughput = data['throughput'].min()
domain = np.arange(min_throughput, max_throughput)
prediction_inputs = np.ones((len(domain), 2))
prediction_inputs[:, 1] = domain
fit_line = fit.predict(prediction_inputs)
return domain, fit_line, round(max_throughput)
if __name__ == '__main__':
matplotlib.style.use('ggplot')
matplotlib.rcParams['figure.figsize'] = 9, 6
matplotlib.rcParams['legend.loc'] = 'best'
matplotlib.rcParams['figure.dpi'] = 120
# We'll need these packages for plotting fit lines
warnings.filterwarnings('ignore')
performanceResultsFile = sys.argv[1]
assert os.path.isfile(performanceResultsFile), 'Missing performance results file'
compareDatasets = False
if compareDatasets:
assert os.path.isfile('old_perfResults.csv'), 'Missing old performance results file "old_perfResults.csv"'
goData = read_throughput_data(performanceResultsFile)
throughputBuckets, throughputData = process_throughput_data(goData)
if compareDatasets:
oldGoData = read_throughput_data('old_perfResults.csv')
oldThroughputBuckets, oldThroughputData = process_throughput_data(oldGoData)
goData['throughput'] = throughputBuckets.transform(len).reset_index()['response-time']
goData.columns = ['start-time', 'latency', 'throughput']
if compareDatasets:
oldGoData['throughput'] = oldThroughputBuckets.transform(len).reset_index()['response-time']
oldGoData.columns = ['start-time', 'latency', 'throughput']
domain, goFitLine, xLimit = generate_fit_line(goData)
if compareDatasets:
oldDomain, oldGoFitLine, oldXLimit = generate_fit_line(oldGoData)
fig, ax = plt.subplots()
# Change the value of `c` to change the color. http://matplotlib.org/api/colors_api.html
ax = goData.plot(ax=ax, kind='scatter', x='throughput', y='latency', c='b', marker='.', alpha=0.2)
ax.plot(domain, goFitLine, c='b', lw=2) # Plot the fit line
if compareDatasets:
ax = oldGoData.plot(ax=ax, kind='scatter', x='throughput', y='latency', c='r', marker='.', alpha=0.2)
ax.plot(oldDomain, oldGoFitLine, c='r', lw=2) # Plot the fit line
ax.legend(['after', 'before'])
# To update x & y axis range change the parameters in function set_(x/y)lim(lower_limit, uppper_limit)
ax.autoscale(True)
ax.autoscale_view(True, True, True)
plt.xlabel('Throughput (requests/sec)')
plt.ylabel('Latency (sec)')
plt.title('Headroom plot', y=1.05)
plt.plot()
filenameForPlot = performanceResultsFile[:-4] + "Plot.png"
plt.savefig(filenameForPlot)
print ("saving graph to " + filenameForPlot)
| cloudfoundry/credhub-perf-release | src/headroomplot/headroomplot.py | headroomplot.py | py | 5,644 | python | en | code | 0 | github-code | 36 |
71739250344 | #!/usr/bin/python3
"""
This module defines a class called Square.
"""
class Square:
"""
This class represents a square shape.
"""
def __init__(self, size=0):
"""
Initializes a new instance of the Square class
Args:
size (int): The side length of the square.
Raises:
TypeError: If size is not an integer.
ValueError: If size is less than 0.
"""
if type(size) is not int:
raise TypeError("size must be an integer")
elif size < 0:
raise ValueError("size must be >= 0")
else:
self._Square__size = size
| JAMBITI/alx-higher_level_programming | 0x06-python-classes/2-square.py | 2-square.py | py | 641 | python | en | code | 0 | github-code | 36 |
74537107943 | import micropython
import machine
# Allocate memory for the case of an exception was rised in an IRQ
micropython.alloc_emergency_exception_buf(100)
class Encoder:
"""
Scheduler is used for soft IRQ, unfortunately, on rp2 the deph is set to 8
which appears to make lose signals
"""
def __init__(self, pin_channel_a, pin_channel_b, reverse_direction = False, use_soft_interrupt_irq=False, sig_trigger=machine.Pin.IRQ_RISING, pull_r=None):
self._counter = 0
if reverse_direction:
self._dir = -1
else:
self._dir = 1
# Initialise rising edge detection
self._pin_channel_a = pin_channel_a
self._pin_channel_a.init(machine.Pin.IN, pull_r)
self._pin_channel_b = pin_channel_b
self._pin_channel_b.init(machine.Pin.IN)
self._pin_channel_a.irq(trigger=sig_trigger, handler=self.signal_hard_handler)
def signal_soft_handler(self, _):
"""
First called by interrupt, then schedule the interrupt handler
asap to avoid allocation problem in IRQ
"""
micropython.schedule(self.signal_handler, self._pin_channel_b.value())
def signal_hard_handler(self, _):
""" Actual implementation of interupt routine"""
if self._pin_channel_b.value() == 0:
self._counter -= self._dir
else :
self._counter += self._dir
def signal_handler(self, _):
""" Actual implementation of interupt routine"""
if self._pin_channel_b.value() == 0:
self._counter -= self._dir
else :
self._counter += self._dir
@property
def counter(self):
return self._counter | bdelaup/upy_class_libs | libs/romi2/encoder.py | encoder.py | py | 1,755 | python | en | code | 2 | github-code | 36 |
4100346454 | from django.urls import path
from myapp import views
urlpatterns = [
path('project/create', views.projectcreate),
path('project/show', views.projectshow),
path('employee/create', views.employeecreate),
path('employee/show', views.employeeshow),
path('project/view/<int:id>', views.projectview, name='project/view'),
path('project/addemployeetoproject/<int:id>', views.addemployeetoproject),
path('project/delete/<int:id>', views.projectdelete),
path('project/update/<int:id>', views.projectupdate),
path('employee/view/<int:id>', views.employeeview, name='employee/view'),
path('employee/asignprojecttoemployee/<int:id>', views.asignprojecttoemployee),
path('employee/update/<int:id>', views.employeeupdate),
path('employee/delete/<int:id>', views.employeedelete)
] | mudassir-cm/m2m | myapp/urls.py | urls.py | py | 802 | python | en | code | 0 | github-code | 36 |
2927331509 | import os
import time
from pathlib import Path
template = "#include<bits/stdc++.h>\n#define mod 1000000007\n#define fastio ios_base::sync_with_stdio(false);cin.tie(NULL);cout.tie(NULL)\n#define pb push_back\n#define mp make_pair\n#define ll long long int\n#define fi first\n#define se second\n#define vll std::vector<ll> vl\n#define ld long double\nusing namespace std;\nint main()\n{\nfastio;\n\n\tll i,j,cnt=0,n;\n\nreturn 0;\n}"
while True:
fname = input('Enter the cpp filename without file extension\n')
ext = int(input('Choose the option of required extension -\n1. cpp\n2. py\n'))
cwd = os.getcwd()
if ext==1:
filename = fname+".cpp"
my_file = Path(os.path.join(cwd,filename))
if my_file.is_file():
print ("File exist, Enter a valid File name")
else:
f = open(os.path.join(cwd,filename),"w+")
f.write(template)
print('File created !')
f.close()
time.sleep(2)
elif ext==2:
filename = fname+".py"
my_file = Path(os.path.join(cwd,filename))
if my_file.is_file():
print ("File exist, Enter a valid File name")
else:
f = open(os.path.join(cwd,filename),"w+")
print('File created !')
f.close()
time.sleep(2)
else:
print('Select Valid option for extension') | Aditya20kul/450-DSA-questions | createFile.py | createFile.py | py | 1,387 | python | en | code | 2 | github-code | 36 |
11378633561 | #!/bin/python3
import os
# Complete the sockMerchant function below.
def sockMerchant(n, array):
counts = {}
for item in array:
try:
counts[item] += 1
except KeyError:
counts.update({item: 1})
n_pairs = 0
for key in counts.keys():
n_pairs += counts[key] // 2
return n_pairs
if __name__ == "__main__":
fptr = open(os.environ["OUTPUT_PATH"], "w")
n = int(input())
ar = list(map(int, input().rstrip().split()))
result = sockMerchant(n, ar)
fptr.write(str(result) + "\n")
fptr.close()
| scouvreur/hackerrank | interview-prep-kit/warm-up-challenges/sock_merchant.py | sock_merchant.py | py | 579 | python | en | code | 1 | github-code | 36 |
74644371943 | import sys
import pygame
import pygame.gfxdraw
class Rubrika:
def __init__(self, x, y, a, b, szin, szoveg):
self.x = x
self.y = y
self.a = a
self.b = b
self.szin = szin
self.szoveg = szoveg
def negyzet(self, kepernyo):
return pygame.Rect(self.x, self.y, self.a, self.b)
def logo(kepernyo):
logo = pygame.image.load("mil_logo.gif")
kepernyo.blit(logo, (435, 60))
def inditas_negyzet():
return pygame.Rect(412, 310, 200, 60)
def dicsoseg_negyzet():
return pygame.Rect(412, 380, 200, 60)
def menu_kilepes_negyzet():
return pygame.Rect(412, 450, 200, 60)
def fomenu(kepernyo):
kepernyo.fill((0, 0, 60))
logo(kepernyo)
fomenu_elemek = [Rubrika(412, 310, 200, 60, pygame.Color(255, 255, 255), "Játék indítása"),
Rubrika(412, 380, 200, 60, pygame.Color(255, 255, 255), "Dicsőséglista"),
Rubrika(412, 450, 200, 60, pygame.Color(255, 255, 255), "Kilépés")]
for f in fomenu_elemek:
negyzet_rajzol(kepernyo, f, 0)
szoveg_negyzetbe(kepernyo, f)
pygame.display.update()
def segitseg_rubrika():
return [Rubrika(12, 306, 75, 60, pygame.Color(255, 255, 255), ""),
Rubrika(122, 306, 75, 60, pygame.Color(255, 255, 255), "")]
def segitseg_negyzet():
'''visszaadja a segítségek dobozainak koordinátáit a kattintáshoz'''
return [pygame.Rect(12, 306, 75, 60), pygame.Rect(122, 306, 75, 60)]
def segitseg_logo(kepernyo, hely, fajl):
fajlbol = pygame.image.load(fajl)
kepernyo.blit(fajlbol, (hely.x + 5, hely.y + 5))
def negyzet_rajzol(kepernyo, rubrika, teli):
if teli == 1:
pygame.gfxdraw.box(kepernyo, rubrika.negyzet(kepernyo), rubrika.szin)
pygame.gfxdraw.rectangle(kepernyo, pygame.Rect(rubrika.x, rubrika.y, rubrika.a+2, rubrika.b+2), pygame.Color(255, 255, 255))
elif teli == 0:
pygame.gfxdraw.rectangle(kepernyo, rubrika.negyzet(kepernyo), rubrika.szin)
def szoveg_negyzetbe(kepernyo, forras):
betustilus = pygame.font.SysFont("Bahnschrift SemiLight", 24)
szoveg = betustilus.render(forras.szoveg, 1, (255, 255, 255))
kepernyo.blit(szoveg, (forras.x + 10, forras.y + (forras.b)/2))
def valasz_negyzet():
return [
pygame.Rect(12, 500, 500, 100),
pygame.Rect(12+500, 500, 500, 100),
pygame.Rect(12, 500+100, 500, 100),
pygame.Rect(12+500, 500+100, 500, 100)
]
def kerdes_valasz_betoltes(kepernyo, forras, statusz):
'''betölti a következő kérdést és válaszlehetőségeket, a statusz 0,1,2 értéket vehet fel: 0=alaphelyzet, 1=jó válasz, 2=rossz válasz - ettől függően változik a jó válasz négyzetének színe'''
kerdes = Rubrika(12, 384, 1000, 100, pygame.Color(255, 255, 255), forras.kerdes)
kategoria = Rubrika(12, 236, 200, 50, pygame.Color(255, 255, 255), forras.kategoria)
valaszok = [
Rubrika(12, 500, 500, 100, pygame.Color(0, 0, 60), "A: {}".format(forras.a)),
Rubrika(12+500, 500, 500, 100, pygame.Color(0, 0, 60), "B: {}".format(forras.b)),
Rubrika(12, 500+100, 500, 100, pygame.Color(0, 0, 60), "C: {}".format(forras.c)),
Rubrika(12+500, 500+100, 500, 100, pygame.Color(0, 0, 60), "D: {}".format(forras.d))
]
if statusz == 0:
pass
elif statusz == 1:
for jo in valaszok:
if jo.szoveg[0] == forras.valasz:
jo.szin = pygame.Color(50, 100, 0)
elif statusz == 2:
for jo in valaszok:
if jo.szoveg[0] == forras.valasz:
jo.szin = pygame.Color(150, 0, 0)
negyzet_rajzol(kepernyo, kerdes, 0)
szoveg_negyzetbe(kepernyo, kerdes)
negyzet_rajzol(kepernyo, kategoria, 0)
szoveg_negyzetbe(kepernyo, kategoria)
for v in valaszok:
negyzet_rajzol(kepernyo, v, 1)
szoveg_negyzetbe(kepernyo, v)
pygame.display.update()
def hatter(kepernyo):
segitseg_hely = segitseg_rubrika()
kepernyo.fill((0, 0, 60))
logo(kepernyo)
segitseg_logo(kepernyo, segitseg_hely[0], "kozonseg.png")
segitseg_logo(kepernyo, segitseg_hely[1], "felezes.png")
kilepes_rubrika = Rubrika(854, 20, 150, 60, pygame.Color(60, 0, 0), "Kilépés")
negyzet_rajzol(kepernyo, kilepes_rubrika, 1)
szoveg_negyzetbe(kepernyo, kilepes_rubrika)
def pontok_betoltes(kepernyo, forras):
pont_rajz = Rubrika(762, 306, 250, 60, pygame.Color(0, 0, 60), "Nyeremény: {} Ft".format(forras))
if "100000 Ft" in pont_rajz.szoveg or "1500000 Ft" in pont_rajz.szoveg or "40000000 Ft" in pont_rajz.szoveg:
pont_rajz.szin = pygame.Color(220, 180, 0)
negyzet_rajzol(kepernyo, pont_rajz, 1)
szoveg_negyzetbe(kepernyo, pont_rajz)
def idozito_keret(kepernyo, szam):
idozito = Rubrika(482, 306, 60, 60, pygame.Color(0, 0, 60), "{}".format(szam))
negyzet_rajzol(kepernyo, idozito, 1)
szoveg_negyzetbe(kepernyo, idozito)
pygame.display.update()
def kezdokepernyo(kepernyo):
'''kirajzolja az ablakba a kezdőképernyő felületét, a rubrikák koordinátái az 1024*720-as ablak pixelei alapján lettek megadva'''
kepernyo.fill((0, 0, 60))
logo(kepernyo)
kezdo_rubrikak = [Rubrika(362, 232, 300, 60, pygame.Color(0, 0, 60), "Játékos neve"),
Rubrika(437, 500, 150, 60, pygame.Color(0, 0, 150), "Játék indítása"),
Rubrika(854, 20, 150, 60, pygame.Color(60, 0, 0), "Kilépés"),
Rubrika(362, 322, 300, 60, pygame.Color(0, 0, 30), "")]
for rubrika in kezdo_rubrikak:
negyzet_rajzol(kepernyo, rubrika, 1)
szoveg_negyzetbe(kepernyo, rubrika)
def szint_negyzetek():
return [pygame.Rect(257, 412, 150, 60), pygame.Rect(437, 400, 150, 60), pygame.Rect(617, 400, 150, 60)]
def inditogomb_negyzet():
return pygame.Rect(437, 500, 150, 60)
def kilepes_negyzet():
return pygame.Rect(854, 20, 150, 60)
def felhasznalo_rubrika():
return Rubrika(362, 322, 300, 60, pygame.Color(0, 0, 30), "")
def szintvalaszto_negyzetek(kepernyo, statusz):
'''statusz információ: Ha 0, akkor alaphelyzet. Ha 1, Kezdő = világoskék, Ha 2 = Normal = világoskék, Ha 3 = Extrem = világoskék'''
szintek = [Rubrika(257, 412, 150, 60, pygame.Color(0, 0, 60), "Kezdő"),
Rubrika(437, 412, 150, 60, pygame.Color(0, 0, 60), "Normál"),
Rubrika(617, 412, 150, 60, pygame.Color(0, 0, 60), "Extrém")]
if statusz == 0:
pass
elif statusz == 1:
szintek[0].szin = pygame.Color(0, 0, 150)
elif statusz == 2:
szintek[1].szin = pygame.Color(0, 0, 150)
elif statusz == 3:
szintek[2].szin = pygame.Color(0, 0, 150)
for rubrika in szintek:
negyzet_rajzol(kepernyo, rubrika, 1)
szoveg_negyzetbe(kepernyo, rubrika)
def szint_hibauzenet(kepernyo):
hibahely = Rubrika(437, 590, 150, 60, pygame.Color(0, 0, 60), "Válassz szintet!")
szoveg_negyzetbe(kepernyo, hibahely)
def segitseg_megjelenes(obj, kepernyo, statusz):
'''paraméterként megadott Segitoeszkoz objektumot rajzolja körbe egy négyzettel, ha fel lett használva piros színnel'''
if statusz == 0:
pass
elif statusz == 1:
obj.szin = pygame.Color(150, 0, 0)
negyzet_rajzol(kepernyo, obj, 0)
pygame.display.update()
def kozonsegszavazat(kepernyo, statusz, *forras):
if statusz == 0:
kozonseg_rubrika = Rubrika(212, 306, 245, 60, pygame.Color(255, 255, 255), "")
elif statusz == 1:
forras = str(forras)
forras = forras[2:-3]
kozonseg_rubrika = Rubrika(212, 306, 245, 60, pygame.Color(255, 255, 255), "{}".format(forras))
negyzet_rajzol(kepernyo, kozonseg_rubrika, 0)
szoveg_negyzetbe(kepernyo, kozonseg_rubrika)
pygame.display.update()
def jatek_vege(kepernyo, forras, statusz):
if statusz:
vege_rubrika = Rubrika(312, 310, 400, 60, pygame.Color(0, 0, 60), "Megnyerted a játékot! Nyereményed: {} Ft".format(forras))
else:
vege_rubrika = Rubrika(312, 310, 400, 60, pygame.Color(0, 0, 60), "Legközelebb jobban megy majd! Nyereményed: {} Ft".format(forras))
jvege = pygame.Surface((1024, 720))
jvege.fill((0, 0, 60))
logo(jvege)
kepernyo.blit(jvege, (0, 0))
szoveg_negyzetbe(kepernyo, vege_rubrika)
kilepes_rubrika = Rubrika(854, 20, 150, 60, pygame.Color(60, 0, 0), "Kilépés")
negyzet_rajzol(kepernyo, kilepes_rubrika, 1)
szoveg_negyzetbe(kepernyo, kilepes_rubrika)
pygame.display.update()
def megallas(kepernyo):
megallas_rubrika = Rubrika(12, 156, 150, 50, pygame.Color(0, 100, 0), "Megállok")
negyzet_rajzol(kepernyo, megallas_rubrika, 0)
szoveg_negyzetbe(kepernyo, megallas_rubrika)
def megallas_negyzet():
return pygame.Rect(12, 156, 150, 50)
def teljesites_ido_UI(ido):
ido = int(ido)
perc = ido//60000
mp = (ido - (perc*60000)) // 1000
return "{} perc {} másodperc".format(perc, mp)
def dicsoseglista_UI(kepernyo, forras):
'''grafikusan megjeleníti a toplistát - 20 legjobb játékos eredményét, a forras parameter egy tömb referenciája, amelyben Jatekos objektumok vannak'''
kepernyo.fill((0, 0, 60))
adat_rubrikak = [Rubrika(12, 12, 250, 38, pygame.Color(255, 255, 255), "NÉV"),
Rubrika(12+250, 12, 250, 38, pygame.Color(255, 255, 255), "PONTSZÁM"),
Rubrika(12+500, 12, 250, 38, pygame.Color(255, 255, 255), "NEHÉZSÉGI SZINT"),
Rubrika(12+750, 12, 250, 38, pygame.Color(255, 255, 255), "TELJESÍTÉSI IDŐ")]
for rubrika in adat_rubrikak:
negyzet_rajzol(kepernyo, rubrika, 0)
szoveg_negyzetbe(kepernyo, rubrika)
listaadatok = []
for f in forras:
telj_ido = teljesites_ido_UI(f.ido)
listaadatok.append([f.nev, f.pontszam, f.nehezseg, telj_ido, f.segitseg])
kezdo_x = 12
kezdo_y = 50
for i in range(20):
try:
f_nev = listaadatok[i][0]
except IndexError:
f_nev = ""
try:
hasznalt_segitseget = listaadatok[i][4]
except:
hasznalt_segitseget = ""
nev = Rubrika(kezdo_x, kezdo_y + i*32, 250, 32, pygame.Color(0, 0, 60), "{}".format(f_nev))
if hasznalt_segitseget == "False":
nev.szin = pygame.Color(220, 180, 0)
negyzet_rajzol(kepernyo, nev, 1)
szoveg_negyzetbe(kepernyo, nev)
pontszamok = []
for i in range(20):
try:
f_pont = listaadatok[i][1]
except IndexError:
f_pont = ""
pontszam = Rubrika(kezdo_x + 250, kezdo_y + i*32, 250, 32, pygame.Color(0, 0, 60), "{}".format(f_pont))
negyzet_rajzol(kepernyo, pontszam, 1)
szoveg_negyzetbe(kepernyo, pontszam)
for i in range(20):
try:
f_szint = listaadatok[i][2]
except IndexError:
f_szint = ""
nehezseg = Rubrika(kezdo_x + 500, kezdo_y + i*32, 250, 32, pygame.Color(0, 0, 60), "{}".format(f_szint))
negyzet_rajzol(kepernyo, nehezseg, 1)
szoveg_negyzetbe(kepernyo, nehezseg)
for i in range(20):
try:
f_ido = listaadatok[i][3]
except IndexError:
f_ido = ""
ido = Rubrika(kezdo_x + 750, kezdo_y + i*32, 250, 32, pygame.Color(0, 0, 60), "{}".format(f_ido))
negyzet_rajzol(kepernyo, ido, 1)
szoveg_negyzetbe(kepernyo, ido)
pygame.display.update()
| pdoszpod11/SchoolProjects | WhoWantsToBeAMillionaire/Game/NHF_UI_v1.py | NHF_UI_v1.py | py | 12,337 | python | hu | code | 0 | github-code | 36 |
670561323 | #!/usr/bin/env python2
# -*- coding: utf-8 -*-
import pandas as pd
import datetime
import matplotlib.pyplot as plt
import paths as paths
from DataBaseProxy import DataBaseProxy
from util import Utility
util = Utility()
dbp = DataBaseProxy()
year = 2017
month = 5
day = 6
#km macchine per enjoy e car2go in una settimana
start = datetime.datetime(year, month, day, 0, 0, 0)
end = datetime.datetime(year, month +2, day, 23, 59, 0)
end2 = datetime.datetime(year, month, day, 23,59,0)
def clean_durations(df):
df = df[df.duration < df.duration.quantile(0.99)]
df = df[df.duration > df.duration.quantile(0.01)]
return df
def duration_per_car(df) :
out_df= pd.DataFrame()
out_df["plate"] = df.plate
out_df['duration'] = df.duration
dur_per_car = out_df.groupby('plate', as_index = False).sum()
return dur_per_car
def bookings_per_car(df):
df_freq = df.groupby('plate').count()
df_freq = df_freq[['_id']].copy()
df_freq = df_freq.rename(columns={'_id': 'freq'})
return df_freq
def parkings_per_car(df) :
out_df= pd.DataFrame()
out_df["plate"] = df.plate
out_df['number_of_parkings'] = df.duration
dur_per_car = out_df.groupby('plate', as_index = False).count()
return dur_per_car
def total_dur_per_car(df, df2):
provider = util.get_provider(df)
color = util.get_color(df)
df = clean_durations(df)
dur_per_car = duration_per_car(df)
freq_per_car = bookings_per_car(df2)
fig, ax = plt.subplots(1, 1, figsize=(9,10))
my_xticks = dur_per_car.plate
# print len(my_xticks)
ax.plot(dur_per_car.index, dur_per_car.duration, linestyle='-', marker='x',color=color)
# ax.set_xticks(my_xticks)
ax.set_title("min per car - " + provider)
ax.set_xlabel("Plate")
ax.set_ylabel("Total minutes")
plt.show()
dur_per_car.set_index('plate', inplace=True)
dur_per_car['freq'] = freq_per_car['freq']
dur_per_car.dropna()
return dur_per_car
def total_dist_per_car_no_outliers (df):
provider = util.get_provider(df)
color = util.get_color(df)
df = clean_durations(df)
dur_per_car = duration_per_car(df)
std = dur_per_car['duration'].std()
avg = dur_per_car['duration'].median()
normalized_durations = dur_per_car[(dur_per_car['duration'] >= (avg-std)) &
(dur_per_car['duration'] <= (avg+std))]
fig, ax = plt.subplots(1, 1, figsize=(9,10))
# my_xticks = normalized_durations.plate
# print len(my_xticks)
# plt.xticks(normalized_durations.index, my_xticks)
plt.plot(normalized_durations.index, normalized_durations['duration'], linestyle='-', marker='x',color=color)
ax.set_title("min per car in std - " + provider)
ax.set_xlabel("Plate")
ax.set_ylabel("Total minutes")
plt.show()
def hist_dur_freq(column, df, df_source, data):
provider = util.get_provider(df_source)
color = util.get_color(df_source)
if column == "duration":
xlabel = "min"
else :
xlabel = ""
if column == "freq":
df = df.dropna()
fig, ax = plt.subplots(2, 4, figsize=(20,10))
fig.suptitle(provider + ' - ' + column + ' distributions')
#uncleaned data
ax[0,0].hist(df[column], 50, facecolor=color, alpha=0.75, cumulative=True, normed=True)
ax[0,0].set_title("CDF - " + column)
ax[0,0].set_xlabel(xlabel)
ax[1,0].hist(df[column], 50, facecolor=color, alpha=0.75)
ax[1,0].set_title("PDF - " + column)
ax[1,0].set_xlabel(xlabel)
#filtering - only cars with at least 3 parkings at day
df = df[df.freq > 30]
ax[0,1].hist(df[column], 50, facecolor=color, alpha=0.75, cumulative=True, normed=True)
ax[0,1].set_title("filtered CDF - " + column)
ax[0,1].set_xlabel(xlabel)
ax[1,1].hist(df[column], 50, facecolor=color, alpha=0.75)
ax[1,1].set_title("filtered PDF - " + column)
ax[1,1].set_xlabel(xlabel)
#divided per number of days
ax[0,2].hist(df[column]/data["valid_days"], 50, facecolor=color, alpha=0.75, cumulative=True, normed=True)
ax[0,2].set_title("filtered CDF per day - " + column)
ax[0,2].set_xlabel(xlabel)
ax[1,2].hist(df[column]/data["valid_days"], 50, facecolor=color, alpha=0.75)
ax[1,2].set_title("filtered PDF per day - " + column)
ax[1,2].set_xlabel(xlabel)
#divided per number of days in interval
ax[0,3].hist(df[column]/data["cleaned_valid_days"], 50, facecolor=color, alpha=0.75, cumulative=True, normed=True)
ax[0,3].set_title("filtered CDF per day clnd - " + column)
ax[0,3].set_xlabel(xlabel)
ax[1,3].hist(df[column]/data["cleaned_valid_days"], 50, facecolor=color, alpha=0.75)
ax[1,3].set_title("filtered PDF per day clnd - " + column)
ax[1,3].set_xlabel(xlabel)
res = {
column+"_mean" : df[column].mean(),
column+"_median": df[column].median(),
column+"_std" : df[column].std(),
column+"_mean_valid_days" : (df[column]/data["valid_days"]).mean(),
column+"_median_valid_days": (df[column]/data["valid_days"]).median(),
column+"_std_valid_days" : (df[column]/data["valid_days"]).std(),
column+"_mean_valid_days_clnd" : (df[column]/data["cleaned_valid_days"]).mean(),
column+"_median_valid_days_clnd": (df[column]/data["cleaned_valid_days"]).median(),
column+"_std_valid_days_clnd" : (df[column]/data["cleaned_valid_days"]).std()
}
fig.savefig(paths.plots_path3+"_"+provider+"_"+column+"_parkings_tats.png", bbox_inches='tight')
return df,res
#
#enjoy_parkings = dbp.query_parkings_df('enjoy','Torino', start, end)
#car2go_parkings = dbp.query_parkings_df('car2go','Torino', start, end)
#enjoy_parkings.to_pickle(paths.enjoy_parkings_pickle_path, None)
#car2go_parkings.to_pickle(paths.car2go_parkings_pickle_path, None)
enjoy = pd.read_pickle(paths.enjoy_pickle_path, None)
car2go = pd.read_pickle(paths.car2go_pickle_path, None)
enjoy_parkings = pd.read_pickle(paths.enjoy_parkings_pickle_path, None)
car2go_parkings = pd.read_pickle(paths.car2go_parkings_pickle_path, None)
#enj_data = util.get_valid_days(enjoy,start,end)
#c2g_data = util.get_valid_days(car2go,start,end)
#enjoy_parkings_duration = duration_per_car(enjoy_parkings)
#enj_park_duration_freq = total_dur_per_car(enjoy_parkings, enjoy)
#total_dist_per_car_no_outliers(enjoy)
#enj_clean, enj_data["park_stats_duration"] = hist_dur_freq("duration", enj_park_duration_freq, enjoy, enj_data)
#enj_clean, enj_data["park_stats_freq"] = hist_dur_freq("freq", enj_park_duration_freq, enjoy, enj_data)
#
#car2go_parkings_duration = duration_per_car(car2go_parkings)
#car2go_park_duration_freq = total_dur_per_car(car2go_parkings, car2go)
#total_dist_per_car_no_outliers(car2go)
#c2g_clean, c2g_data["park_stats_duration"] = hist_dur_freq("duration", car2go_park_duration_freq, car2go, c2g_data)
#c2g_clean, c2g_data["park_stats_freq"] = hist_dur_freq("freq", car2go_park_duration_freq, car2go, c2g_data)
"""
Avg parking time per car (valid days)
"""
#enj_clean["duration_per_day"] = enj_park_duration_freq["duration"]/(enj_data["cleaned_valid_days"])
#enj_clean["freq_per_day"] = enj_park_duration_freq["freq"]/(enj_data["cleaned_valid_days"])
#c2g_clean["duration_per_day"] = car2go_park_duration_freq["duration"]/(c2g_data["cleaned_valid_days"])
#c2g_clean["freq_per_day"] = car2go_park_duration_freq["freq"]/(enj_data["cleaned_valid_days"])
#
#
#fig,ax =plt.subplots(1, 1, figsize=(9,10))
#enj_clean.hist(ax=ax, color=util.get_color(enjoy))
#fig2,ax2 = plt.subplots(1, 1, figsize=(9,10))
#c2g_clean.hist(ax=ax2, color=util.get_color(car2go))
'''
come informazione ho il numero di minuti in cui è stata ferma la macchina, e il numero di prenotazioni che questa ha
ricevuto
'''
#total_dist_per_car_no_outliers(enjoy_parkings)
#dur_per_car['index'] = dur_per_car['index'] / (dur_per_car['index'].sum())
#dur_per_car.hist(bins=100, cumulative=True, normed=True)
#df2 = parkings_per_car(enjoy_parkings)
#enjoy_parkings_duration['count'] = df2['number_of_parkings']
#
#df = enjoy_parkings[
# (enjoy_parkings.plate == 'EZ049TY')
# ]
| michelelt/MyTool | Analysis/parkingsAnalysis.py | parkingsAnalysis.py | py | 8,244 | python | en | code | 0 | github-code | 36 |
19107043906 | # -*- coding: utf-8 -*-
"""
Created on Wed Oct 3 10:54:37 2018
@author: Administrator
"""
from greedyBandit import GreedyBandit
from decGreedyBandit import DecGreedyBandit
from optiBandit import OptiBandit
from UCBBandit import UCBBandit
from TSBandit import TSBandit
import matplotlib.pyplot as plt
times = 10000 #Simulation times
greedy = GreedyBandit(0,0) #Greedy Bandit
dec_greedy = DecGreedyBandit(0) #Decreasing Greedy Bandit
opti = OptiBandit(0) #Optimistic Initial Value Bandit
ucb = UCBBandit(0) #UCB Bandit
ts = TSBandit(0) #TS Bandit
#Function Arguments
m1=0.9
m2=0.5
m3=0.2
e=0.1
#Run Epsilon Greedy
greedy_result = greedy.run(m1,m2,m3,e,times)
#Run Decreasing Epsilon Greedy
dec_greedy_result = dec_greedy.run(m1,m2,m3,times)
#Run Optimistic Initial Value
opti_result = opti.run(m1,m2,m3,e,times)
#Run UCB
ucb_result = ucb.run(m1,m2,m3,times)
#Run Thompson Sampling
ts_result = ts.run(m1,m2,m3,times)
#Plot the result
plt.plot(greedy_result,label="Epsilon Greedy")
plt.plot(dec_greedy_result,label="Decreasing Epsilon Greedy")
plt.plot(opti_result,label="Optimistic Initial Value")
plt.plot(ucb_result,label="UCB1")
plt.plot(ts_result,label="Thompson Sampling")
#Show the graph
plt.legend(loc='upper right')
plt.show() | JJ-Tom-Li/Reinforcement-Machine-Learning | Programming Project -1 -MAB/main.py | main.py | py | 1,356 | python | en | code | 0 | github-code | 36 |
36979603033 | def add_numbers(n1, n2):
result = n1+n2
return result
def multiply_numbers(n1, n2):
result = n1 * n2
return result
num1 = float(input("enter first num:"))
num2 = float(input("enter second num:"))
summ = add_numbers(num1, num2)
product = multiply_numbers(num1, num2)
print("the sum of the two numbers is:", summ, " and its product is:", product)
| EndesewB/Python3 | funcPractice.py | funcPractice.py | py | 354 | python | en | code | 0 | github-code | 36 |
975733751 | """
File: model_ops.py
Author: Tomáš Daniš
Login: xdanis05
Description: Module holding functions implementing model operations - such as training, predicting or evaluating
"""
import torch.nn as nn
import torch.nn.utils as tut
import torch.optim as optim
import torch
from tqdm import tqdm
import torchtext.data as torch_data
import conf
from baseline import totext
_DEBUG = False # Debugging flag
def train(net, train_iterator, dev_iterator, vocabs, epoch_num=4, lr=0.002):
"""
Train a given model on the given dataset
:param net: Model to train
:param train_iterator: Iterator through a training set of the dataset to use
:param dev_iterator: Iterator through a development set of the dataset to use
:param vocabs: Vocabularies used in the dataset, only used for debugging
:param epoch_num: Number of epochs to train for
:param lr: Learning rate to train with
:return: None
"""
net.train()
criterion = nn.CrossEntropyLoss()
parameters = filter(lambda p: p.requires_grad, net.parameters())
optimizer = optim.Adamax(parameters, lr=lr)
# Training loop
for epoch in tqdm(range(epoch_num), total=epoch_num, desc="Epoch"):
epoch_loss = 0
# Epoch loop
for i, batch in tqdm(enumerate(train_iterator), total=len(train_iterator), desc="Iteration"):
net.train()
if _DEBUG:
q = totext(batch.question[0],vocabs[0],batch_first=False)
d = totext(batch.document[0],vocabs[0],batch_first=False)
a1 = totext(batch.answer1[0],vocabs[0],batch_first=False)
a2 = totext(batch.answer2[0],vocabs[0],batch_first=False)
print("* "*20+"NEXT"+"* "*20)
print(d[0])
print("* " * 20 + "Question" + "* " * 20)
print(q[0])
print("* " * 20 + "Answers" + "* " * 20)
print(a1[0])
print(a2[0])
optimizer.zero_grad()
out = net(batch)
loss = criterion(out, batch.correct)
loss.backward()
tut.clip_grad_norm_(parameters, 0.5)
optimizer.step()
epoch_loss += loss.item()
# At the end of an epoch, evaluate the current performance on the development set
with torch.no_grad():
net.eval()
dev_loss = 0
j = 0
correct = 0
total = 0
for j, val_batch in enumerate(dev_iterator):
out = net(val_batch)
total += val_batch.correct.size(0)
loss = criterion(out, val_batch.correct)
dev_loss += loss.item()
_, pred_indexes = torch.max(out.data, 1)
correct += (pred_indexes == val_batch.correct).sum().item()
print('Epoch: {0}, Train loss: {1}, Dev loss: {2}, Dev accuracy: {3}%'.format(
epoch, epoch_loss/len(train_iterator), dev_loss/(j+1), correct*100/total))
def predict(net, input, fields):
"""
Predict answers for the given input
:param net: Model to predict with
:param input: Input to predict on
:param fields: Structure of the data the model expects
:return: Predictions for the given inputs
"""
net.eval()
example = torch_data.Example.fromlist(input, fields)
dataset = torch_data.Dataset([example])
iterator = torch_data.Iterator(dataset, batch_size=1)
net_in = next(iter(iterator))
return predict_batch(net, net_in)
def predict_batch(net, batch):
"""
Predicts a single batch using the model provided
:param net: Model to predict with
:param batch: Batch to predict on
:return: Predictions
"""
with torch.no_grad():
out = net(batch)
_, predicted = torch.max(out.data, 1)
return predicted
def eval_model(net, val_iter):
"""
Evaluate a model's performance on the given test set
:param net: Model to evaluate
:param val_iter: Data to evaluate on
:return: A tuple with the first item being the accuracy. The second item is a list of F1 scores for all classes
in the task.
"""
correct = 0
total = 0
cm = conf.ConfusionMatrix([0, 1])
net.eval()
with torch.no_grad():
for batch in val_iter:
total += batch.correct.size(0)
prediction = predict_batch(net, batch)
cm.add_entry(batch.correct.tolist(), prediction.tolist())
correct += (prediction == batch.correct).sum().item()
return correct/total, cm.get_f1()
| AgiNetz/FIT-VUT-projects | Thesis - Machine Comprehension using Commonsense Knowledge/Source codes/Baseline/model_ops.py | model_ops.py | py | 4,576 | python | en | code | 0 | github-code | 36 |
15239706297 | # Downloadable Modules
import os
import socket
import argparse
import atexit
import time
from threading import Thread
# Self-made Modules
import database_utility as db_util
from ping_utility import ping_service
from handlers import conn_handler
import schemas
import config
# Startup of server
# Initialize Variables
HOST = '127.0.0.1'
PORT = 10000
OPEN_CONNECTION_LIMIT = 100
# File and Storage Schemas in the database
file_info = schemas.file
storage_info = schemas.storage
# Parse Arguments
parser = argparse.ArgumentParser()
parser.add_argument('--host', type=str,
default=config.HOST, help='IP for the server')
parser.add_argument('--port', type=int,
default=config.PORT, help='Port for the server')
parser.add_argument('--open_conn_limit', type=int,
default=config.OPEN_CONNECTION_LIMIT,
help='The limit of number of open connections')
args = parser.parse_args()
args_dict = vars(args)
# Create Database
print("Connecting to database ...")
db_handler = db_util.DB_Interface()
db_handler.connect_db()
print("Connection Successful")
# Start Ping Service
print("Starting Ping service ...")
ping_thread = Thread(target = ping_service,
args= (db_handler, storage_info["table_name"], "ip"))
ping_thread.daemon = True
ping_thread.start()
print("Ping Started")
# Create an open socket for accepting connections
print("Creating Socket for acceptings connections")
open_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
open_socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
open_socket.bind((args.host, args.port))
open_socket.listen(args.open_conn_limit)
print("Socket Started")
@atexit.register
def cleanup():
print("Closing connection to socket")
open_socket.close()
while True:
# Accept connection
conn, addr = open_socket.accept()
print("Got a connection from {}".format(addr))
db_handler_thread = db_util.DB_Interface()
db_handler_thread.connect_db()
handler_thread = Thread(
target = conn_handler, args=(conn, addr, db_handler_thread))
handler_thread.start()
| arhamchopra/decentralized-file-storage | server/server.py | server.py | py | 2,094 | python | en | code | 1 | github-code | 36 |
22776733908 | import json
import sys
import os
import exceptions as e
import dev
from decouple import config
import time
import itemdat
import pathlib
pathto = str(pathlib.Path().absolute())
devmsg = dev.Dev.SendMessage()
devmsg("Setting up RPG module...")
class RPG():
def __call__(self):
self.start_up()
pass
def get_user_inv(self, profile):
devmsg("Getting user inv data...")
with open((pathto + f"\\playerdata\\player{profile}.json"), "r") as file:
userdata = json.load(file)
inv = userdata["INV"]
if inv is None:
raise e.Fatals.CantFindPlayerDataFiles(f"PLAYER{profile}.JSON IS EMPTY")
else:
with open((pathto + "\\data\\items.json"), "r", encoding='utf-8') as file:
itemd = json.load(file)
itemdata = itemd["ITEMDATA"]
for i in inv:
try:
item_name = itemdata[i]["NAME"]
item_lore = itemdata[i]["INFO"]
a = "\nПредмет:\n" + item_name + "\n" + item_lore
print(a)
except KeyError:
raise e.JSONErrors.CantParseInventoryData(f"PLAYER{profile}.JSON HAS ILLEGAL ITEMDATA")
def start_up(self):
prof = {}
for i in [1, 2, 3]:
with open((pathto + f"\\playerdata\\player{i}.json"), "r") as file:
devmsg(f"Checking file number {i}...")
profiledata = json.load(file)
isocc = profiledata["?EMPTY"]
if not isocc:
prof[f"prof{i}"] = 'полон'
else:
prof[f"prof{i}"] = 'пуст'
profstr = "Профиль 1 - " + prof["prof1"] + "\nПрофиль 2 - " + prof["prof2"] + "\nПрофиль 3 - " + prof["prof3"]
print("▀████ ▐████▀ ▄████████ ▄█ ")
time.sleep(0.2)
print(" ███▌ ████▀ ███ ███ ███ ")
time.sleep(0.2)
print(" ███ ▐███ ███ ███ ███▌ ")
time.sleep(0.2)
print(" ▀███▄███▀ ███ ███ ███▌ ")
time.sleep(0.2)
print(" ████▀██▄ ▀███████████ ███▌ ")
time.sleep(0.2)
print(" ▐███ ▀███ ███ ███ ███ ")
time.sleep(0.2)
print(" ▄███ ███▄ ███ ███ ███ ")
time.sleep(0.2)
print("████ ███▄ ███ █▀ █▀ \n")
print("Добро пожаловать в XAI!\n")
print("Введите 1 чтобы начать")
print("Введите 2 чтобы выйти")
profstr = "Профиль 1 - " + prof["prof1"] + "\nПрофиль 2 - " + prof["prof2"] + "\nПрофиль 3 - " + prof["prof3"]
filet = True
while filet is True:
try:
answ = input("Ваш ввод:\n")
filet = False
except ValueError:
print("Это не число!")
if answ == "2":
devmsg("Exiting...")
exit()
elif answ == "1":
devmsg('User input = {"answ": "1"}')
print("Вы выбрали начать.")
print("Введите номер профиля для открытия/создания. Если профиль полон, то он будет открыт, иначе он будет создан.")
print(profstr)
filet = True
while filet is True:
try:
profile_chosen = int(input("Введите номер профиля...\n"))
if profile_chosen == "":
raise ValueError
filet = False
except ValueError:
print("Это не число!")
devmsg(f"User chose to open profile{profile_chosen}.json")
devmsg("Trying to open profile data...")
if profile_chosen >= 1 and profile_chosen <= 3:
newps = "Вы выбрали профиль " + str(profile_chosen)
devmsg(f"Profile{profile_chosen} exists and will be opened")
profiledata = self.Profile.new_profile(self, profile_chosen)
else:
devmsg(f"Profile{profile_chosen} doesnt exists!")
print("Профиль не существует! Выходим...")
time.sleep(1)
exit()
print(f"Вы выбрали и открыли профиль {profile_chosen}. Что теперь?")
print("Доступные функции:")
print("1 - просмотреть инвентарь")
print("2 - добавить предметы в инвентарь (не стабильно). Предметы - это ID с 001 до 008 включительно.")
print("3 - начать битву")
print("4 - выйти.")
filet = True
while filet is True:
try:
answ = int(input("Введите число.\n"))
filet = False
except ValueError:
print("Это не число!")
if answ == 4:
devmsg("Closing...")
exit()
elif answ == 2:
self.Profile.give(self, profile_chosen)
elif answ == 1:
self.get_user_inv(profile_chosen)
elif answ == 3:
enemy = itemdat.generate_random_mob(itemdat.Item.Use.GetPlayerLocation(profile_chosen))
itemdat.Item.Use.battle(itemdat.Item.Use, profile_chosen, enemy)
answ2 = input("\n")
class Profile():
def __call__(self, slotnum, playername):
self.new_profile(slotnum, playername)
pass
sample_profile_data = {
"?EMPTY": False,
"ENDINGS_COMPLETED": {
"POSITIVE": False,
"NEGATIVE": False,
"MIDDLE": False,
"SHREK": False,
"OUTBREAK": False,
"SECRET": False,
"S_DEATH": False,
"TRUTH": False
},
"LOCATIONS VISITED": {
"FIELDS": False,
"BROKEN_TOWN": False,
"ABANDONED_FARMS": False,
"TEMPLE": False,
"MOUNT_VILLAGE": False,
"SUMMIT": False,
"LAB": False,
"HARDMODE_LOCS": {
"HOPELESS_FIELDS": False,
"REMNANTS_OF_TOWN": False,
"BURNT_FARMS": False,
"FORBIDDEN_TEMPLE": False,
"HIGH_PEAKS": False,
"LAST_SUMMIT": False,
"CLONE_LAB": False
}
},
"DEATH_AMOUNT": 0,
"HM_ON": False,
"INV": [],
"CURRENT_LOCATION": "FIELDS",
"BALANCE": 0,
"ENEMIES_SLAIN": {
"NORMAL": {
"FIELDS": 0,
"BROKEN_TOWN": 0,
"ABANDONED_FARMS": 0,
"TEMPLE": 0,
"MOUNT_VILLAGE": 0,
"SUMMIT": 0,
"LAB": 0
},
"HM": {
"HOPELESS_FIELDS": 0,
"REMNANTS_OF_TOWN": 0,
"BURNT_FARMS": 0,
"FORBIDDEN_TEMPLE": 0,
"HIGH_PEAKS": 0,
"LAST_SUMMIT": 0,
"CLONE_LAB": 0,
"STRONGHOLD": 0
},
"EXPERIENCE": 0
}
}
def new_profile(self, slotnum:int):
if slotnum >= 0 and slotnum <= 3:
with open((pathto + f"\\playerdata\\player{slotnum}.json"), "r", encoding='utf-8') as file:
devmsg(f"Creating new profile with number {slotnum}...")
profiledata = json.load(file)
isempty = profiledata["?EMPTY"]
if not isempty:
devmsg(f"Cant overwrite an existing file with number '{slotnum}'")
devmsg("Looking for solution...")
time.sleep(1)
with open((pathto + f"\\playerdata\\player{slotnum}.json"), "r", encoding='utf-8') as file:
devmsg(f"Opening the file instead")
profiledata = json.load(file)
return profiledata
else:
playername = input("Введите имя нового персонажа\n")
self.Profile.sample_profile_data["NAME"] = playername.capitalize()
devmsg(f"Dumping sample data into existing json file...")
with open((pathto + f"\\playerdata\\player{slotnum}.json"), "w", encoding="utf-8") as file:
json.dump(self.Profile.sample_profile_data, file, indent=4, sort_keys=True, ensure_ascii=False)
with open((pathto + f"\\playerdata\\player{slotnum}.json"), "r", encoding='utf-8') as file:
profiledata = json.load(file)
return profiledata
else:
devmsg(f"Couldnt create a new profile with number {slotnum}, as it doesn't exist!")
print("Профиль не существует...")
time.sleep(1)
raise e.Fatals.CantFindDataFiles("Профиль не существует")
def give(self, profile):
with open((pathto + f"\\playerdata\\player{profile}.json"), "r", encoding='utf-8') as file:
devmsg(f"Loading profile with number {profile}...")
profiledata = json.load(file)
print("Выберите айди предмета, чтобы дать персонажу. АЙДИ можно посмотреть на вики, то есть на\nhttps://github.com/Maxuss/XAI/wiki/ID-предметов")
id_item = input("Введите айди\n")
try:
with open((pathto + "\\data\\items.json"), "r", encoding='utf-8') as file:
devmsg("Opening itemdata...")
itemd = json.load(file)
itemdata = itemd["ITEMDATA"]
_current = itemdata[id_item]
profiledata["INV"].append(id_item)
with open((pathto + f"\\playerdata\\player{profile}.json"), "w", encoding='utf-8') as file:
devmsg("Dumping data to profile...")
json.dump(profiledata, file, indent=4, sort_keys=True, ensure_ascii=False)
print("Предмет добавлен!\nПерезайдите, чтобы посмотреть данные инвентаря!")
except KeyError:
devmsg("Item with this ID doesn't exist!")
print("Такой предмет не существует.")
time.sleep()
raise e.FileErrors.CantGenerateLoot("Предмет не существует.") | Maxuss/XAI | rpg.py | rpg.py | py | 11,367 | python | en | code | 1 | github-code | 36 |
22905033389 | import requests
import json
import re
req=requests.Session()
header={
"user-agent":"Mozilla/5.0 (Macintosh; Intel Mac OS X 10_13_2) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/64.0.3282.186 Safari/537.36"
}
url="http://www.122.gov.cn/m/map/select"
page=req.get(url=url,headers=header)
print(page.text)
pattern=re.compile(r"<i sfdm=\"(\d+)\" sfmc=\"(.*?)\" ym=\"(.*?)\" fzjg=\"(.*?)\".*?sftjb=\"(.*?)\"></i>",re.S)
d=re.findall(pattern,page.text)
s={}
for i in d:
s[i[0]]={"address":i[1],"url":i[2],"cp":i[3],"sftjb":i[4]}
print(s)
json.dump(s,open("./info.json","w")) | nanxung/testProject | test.py | test.py | py | 585 | python | en | code | 0 | github-code | 36 |
17580721838 | import sqlite3
import sys, os
from PyQt6.QtWidgets import *
from PyQt6.QtCore import *
from PyQt6.QtGui import *
from PyQt6 import *
sys.path.append(os.path.abspath(os.path.join(os.path.dirname(__file__), '..')))
from utils.database_table_setup import *
from utils.promo_management_sql import *
class PromoManagementWidget(QWidget):
def __init__(self):
super().__init__()
self.setWindowTitle('Promo Management')
self.create_table = CreateDatabaseTable()
self.create_table.database_table()
self.init_layout()
# filter Promo flag
def filter_Promo(self):
filter_input = self.filter_Promo_field.text()
if filter_input == '':
self.list_Promo_table.display_Promo_table(filter_input)
else:
self.list_Promo_table.filter_Promo_table(filter_input)
def init_layout(self):
self.layout = QGridLayout()
self.filter_Promo_field = QLineEdit()
self.filter_Promo_field.setPlaceholderText('Filter Promo by Promo name, supplier, etc...')
self.add_promo_button = QPushButton('ADD')
self.list_Promo_table = PromoListTable() # -- class ListPromoTable(QTableWidget)
self.filter_Promo_field.textChanged.connect(self.filter_Promo) # connects to filter_Promo functions every change of text
self.add_promo_button.clicked.connect(lambda: self.list_Promo_table.save_Promo_window("", "", "", "",""))
self.layout.addWidget(self.filter_Promo_field,0,0)
self.layout.addWidget(self.add_promo_button,0,1)
self.layout.addWidget(self.list_Promo_table,1,0,1,2)
self.setLayout(self.layout)
class PromoListTable(QTableWidget):
data_saved = pyqtSignal()
def __init__(self):
super().__init__()
self.call_sql_utils()
self.init_layout()
def call_sql_utils(self):
self.PromoSelect = SelectPromoData()
self.update_Promo = UpdatePromoData()
def init_layout(self):
self.setColumnCount(5)
self.setHorizontalHeaderLabels(["","Promo Name", "Description", "Type", "Value"])
self.populate_PromoList()
def populate_PromoList(self):
self.clearContents()
data = self.PromoSelect.all_Promo_data()
self.setRowCount(len(data))
for row_index, (Promo_name, Promo_desc, promo_type, promo_value) in enumerate(data):
self.setItem(row_index, 1, QTableWidgetItem(str(Promo_name)))
self.setItem(row_index, 2, QTableWidgetItem(str(Promo_desc)))
self.setItem(row_index, 3, QTableWidgetItem(str(promo_type)))
self.setItem(row_index, 4, QTableWidgetItem(str(promo_value)))
self.edit_tem_button = QPushButton('EDIT')
self.edit_tem_button.clicked.connect(lambda row_index=row_index, Promo_name=Promo_name, Promo_desc=Promo_desc, Promo_type=promo_type, Promo_value=promo_value : self.save_Promo_window(row_index, Promo_name, Promo_desc, Promo_type, Promo_value))
self.setCellWidget(row_index, 0, self.edit_tem_button)
def save_Promo_window(self, row_index, Promo_name, Promo_desc, Promo_type, Promo_value):
edit_item_dialog = SavePromoDialog(row_index, Promo_name, Promo_desc, Promo_type, Promo_value)
edit_item_dialog.exec()
self.populate_PromoList()
self.data_saved.emit()
class SavePromoDialog(QDialog):
data_saved = pyqtSignal()
def __init__(self, row_index, Promo_name, Promo_desc, Promo_type, Promo_value):
super().__init__()
self.newPromo=""
self.call_sql_utils()
self.init_layout(row_index, Promo_name, Promo_desc, Promo_type, Promo_value)
def call_sql_utils(self):
self.promo_data = InsertPromoData()
self.update_Promo = UpdatePromoData()
def init_layout(self, row_index, Promo_name, Promo_desc, Promo_type, Promo_value):
self.layout = QGridLayout()
self.numeric_input_validator = QDoubleValidator()
self.numeric_input_validator.setDecimals(2) # Set the number of decimal places
self.numeric_input_validator.setNotation(QDoubleValidator.Notation.StandardNotation)
self.numeric_input_validator.setLocale(QLocale(QLocale.Language.English, QLocale.Country.UnitedStates))
NameLbl = QLabel("Promo Name ")
self.NameVal = QLineEdit()
self.NameVal.setText(str(Promo_name))
PromoDescLbl = QLabel("Description ")
self.PromoDescVal = QLineEdit()
self.PromoDescVal.setText(str(Promo_desc))
PromoTypeLbl = QLabel("Promo Type ")
self.PromoTypeVal = QLineEdit()
self.PromoTypeVal.setText(str(Promo_type))
PromoValLbl = QLabel("Promo Value ")
self.PromoVal = QLineEdit()
self.PromoVal.setText(str(Promo_value))
self.PromoVal.setValidator(self.numeric_input_validator)
save_button = QPushButton('SAVE')
save_button.clicked.connect(lambda: self.Save_Promo(Promo_name))
self.layout.addWidget(NameLbl,0,0)
self.layout.addWidget(self.NameVal,0,1)
self.layout.addWidget(PromoDescLbl,1,0)
self.layout.addWidget(self.PromoDescVal,1,1)
self.layout.addWidget(PromoTypeLbl,2,0)
self.layout.addWidget(self.PromoTypeVal,2,1)
self.layout.addWidget(PromoValLbl,3,0)
self.layout.addWidget(self.PromoVal,3,1)
self.layout.addWidget(save_button,4,1)
self.setLayout(self.layout)
def Save_Promo(self, Promo_name):
newPromo = self.NameVal.text()
newDesc = self.PromoDescVal.text()
newTpye = self.PromoTypeVal.text()
newVal = self.PromoVal.text()
if Promo_name == "":
self.promo_data.InsertPromo(newPromo, newDesc, newTpye, newVal)
else:
self.update_Promo.promo_data(newPromo, newDesc, newTpye, newVal, Promo_name)
self.close()
if __name__ == ('__main__'):
pos_app = QApplication(sys.argv)
window = PromoManagementWidget()
window.show()
sys.exit(pos_app.exec())
| roymagtibay/pos_system | promo/admin/promo_management.py | promo_management.py | py | 6,249 | python | en | code | null | github-code | 36 |
42186826955 | import discord
from discord.ext import commands
from discord.utils import get
import json
from discord.ext.commands import has_permissions, MissingPermissions
import asyncio
bot = commands.Bot(command_prefix="!")
@bot.event
async def on_ready():
print("Ticket bot running...")
@bot.command()
async def help_me(ctx):
em = discord.Embed(title="Auroris Tickets Help", description="", color=0x00a8ff)
await ctx.send(embed=em)
#@bot.command()
async def new(ctx, message_content = ""):
await bot.wait_until_ready()
with open("data.json") as f:
data = json.load(f)
ticket_number = int(data["ticket-counter"])
ticket_number += 1
ticket_channel = await ctx.guild.create_text_channel("ticket-{}".format(ticket_number))
await ticket_channel.set_permissions(ctx.guild.get_role(ctx.guild.id), send_messages=False, read_messages=False)
for role_id in data["valid-roles"]:
role = ctx.guild.get_role(role_id)
await ticket_channel.set_permissions(role, send_messages=True, read_messages=True, add_reactions=True, embed_links=True, attach_files=True, read_message_history=True, external_emojis=True)
await ticket_channel.set_permissions(ctx.author, send_messages=True, read_messages=True, add_reactions=True, embed_links=True, attach_files=True, read_message_history=True, external_emojis=True)
em = discord.Embed(title="New ticket from {}#{}".format(ctx.author.name, ctx.author.discriminator), description= "{}".format(message_content), color=0x00a8ff)
await ticket_channel.send(embed=em)
pinged_msg_content = ""
non_mentionable_roles = []
if data["pinged-roles"] != []:
for role_id in data["pinged-roles"]:
role = ctx.guild.get_role(role_id)
pinged_msg_content += role.mention
pinged_msg_content += " "
if role.mentionable:
pass
else:
await role.edit(mentionable=True)
non_mentionable_roles.append(role)
await ticket_channel.send(pinged_msg_content)
for role in non_mentionable_roles:
await role.edit(mentionable=False)
data["ticket-channel-ids"].append(ticket_channel.id)
data["ticket-counter"] = int(ticket_number)
with open("data.json", 'w') as f:
json.dump(data, f)
created_em = discord.Embed(title="Auroris Tickets", description="Your ticket has been created at {}".format(ticket_channel.mention), color=0x00a8ff)
await ctx.send(embed=created_em)
@bot.command()
async def close(ctx):
with open('data.json') as f:
data = json.load(f)
if ctx.channel.id in data["ticket-channel-ids"]:
channel_id = ctx.channel.id
def check(message):
return message.author == ctx.author and message.channel == ctx.channel and message.content.lower() == "close"
try:
em = discord.Embed(title="Interest Tickets", description="Are you sure you want to close this ticket? Reply with `close` if you are sure.", color=0x00a8ff)
await ctx.send(embed=em)
await bot.wait_for('message', check=check, timeout=60)
await ctx.channel.delete()
index = data["ticket-channel-ids"].index(channel_id)
del data["ticket-channel-ids"][index]
with open('data.json', 'w') as f:
json.dump(data, f)
except asyncio.TimeoutError:
em = discord.Embed(title="Auroris Tickets", description="You have run out of time to close this ticket. Please run the command again.", color=0x00a8ff)
await ctx.send(embed=em)
@bot.command()
async def interest(ctx):
page = discord.Embed (
title = 'Which sport are you interested in?',
description = '''
'⚽': soccer ball
'⚾': baseball
'🥎': softball
'🏀': basketball
'🏐': volleyball
Please click sport emoji you are interested in.
''',
colour = discord.Colour.orange()
)
message = await ctx.send(embed = page)
await message.add_reaction('⚽') #soccer ball
await message.add_reaction('⚾') #baseball
await message.add_reaction('🥎') #softball
await message.add_reaction('🏀') #basketball
await message.add_reaction('🏐') #volleyball
def check(reaction, user):
return user == ctx.author
reaction = None
while True:
if str(reaction) == '⚽':
await new(ctx, message_content="Welcome to Soccer Ball Ticket! If you wanna close, please type !close")
elif str(reaction) == '⚾':
await new(ctx, message_content="Welcome to Baseball Ticket! If you wanna close, please type !close")
elif str(reaction) == '🥎':
await new(ctx, message_content="Welcome to Softball Ticket! If you wanna close, please type !close")
elif str(reaction) == '🏀':
await new(ctx, message_content="Welcome to Basketball Ticket! If you wanna close, please type !close")
elif str(reaction) == '🏐':
await new(ctx, message_content="Welcome to Volleyball Ticket! If you wanna close, please type !close")
try:
reaction, user = await bot.wait_for('reaction_add', timeout = 30.0, check = check)
await message.remove_reaction(reaction, user)
except:
break
await message.clear_reactions()
bot.run('ODczMjQ2MjI2MDEzOTEzMTA5.YQ1n7A.PSRzth7b-scgFehCtJEs6yhGI_8')
#bot.run('ODcyNDEwOTc4MTkxNTczMDQy.YQpeCQ.FCeibb_4ee1NkZAo2irmLhH2fLI') | stevewoz1234567890/disocrd-bot | ticket_bot.py | ticket_bot.py | py | 5,611 | python | en | code | 0 | github-code | 36 |
27861498270 | import os
def search_cells(cells, target, size=0):
return_list = []
if size == 0:
size = len(cells)
cell = cells[0]
remaining_cells = []
if len(cells) > 1:
remaining_cells = cells.copy()
remaining_cells.remove(cell)
for value in cell:
remain_tar = target - value
if remaining_cells:
other_vals = search_cells(remaining_cells, remain_tar, size)
if other_vals:
for other_val in other_vals:
if len(other_val) != size:
other_val.append(value)
return_list.append(other_val)
elif remain_tar == 0:
return_list.append([value])
return return_list
def clean_search(cells, target):
cleaned = []
raw_combos = search_cells(cells, target)
if raw_combos:
cells = []
for index in range(len(raw_combos[0])):
single_cell = []
for combo in raw_combos:
single_cell.append(combo[index])
cells.append(single_cell)
for cell in cells:
cleaned.append(dedupe(cell))
cleaned.reverse()
return cleaned
def dedupe(val):
return sorted(list(set(val)))
def validate_input(val):
try:
int(val)
return True
except:
return False
while True:
print('Welcome you filthy cheater')
print('Press Enter to continue, q to quit')
cell_list_s = []
while True:
cell_val = input('Cell values:')
if not validate_input(cell_val):
break
else:
cell_list_s.append(cell_val)
if cell_val != '' and not validate_input(cell_val):
break
target_s = input("Target value:")
try:
target = int(target_s)
except:
target = 0
if target and cell_list_s:
cell_list = []
for cell_s in cell_list_s:
cell = []
for i in range(len(cell_s)):
try:
cell.append(int(cell_s[i]))
except:
# Shouldn't be able to reach this state...
os.system('clear')
print('Invalid input found goodbye.')
break
cell_list.append(cell)
results = clean_search(cell_list, target)
print(f"Results: {results}")
diff_list = []
for input_cell, result_cell in zip(cell_list, results):
diff_list.append([val for val in input_cell if val not in result_cell])
print(f"Unuseable values: {diff_list}")
input('Press Enter to startover')
os.system('clear') | Tbilgere/killer_sudoku_cell_lister | killer_sudoku_cell_lister.py | killer_sudoku_cell_lister.py | py | 2,662 | python | en | code | 0 | github-code | 36 |
34377275517 | """
Pipeline modules to prepare the data for the PSF subtraction.
"""
import time
import warnings
from typing import Optional, Tuple
import numpy as np
from astropy.coordinates import EarthLocation
from astropy.time import Time
from typeguard import typechecked
from pynpoint.core.processing import ProcessingModule
from pynpoint.util.module import progress, memory_frames
from pynpoint.util.image import create_mask, scale_image, shift_image
class PSFpreparationModule(ProcessingModule):
"""
Module to prepare the data for PSF subtraction with PCA. The preparation steps include masking
and an optional normalization.
"""
__author__ = 'Markus Bonse, Tomas Stolker, Timothy Gebhard, Sven Kiefer'
@typechecked
def __init__(self,
name_in: str,
image_in_tag: str,
image_out_tag: str,
mask_out_tag: Optional[str] = None,
norm: bool = False,
resize: Optional[float] = None,
cent_size: Optional[float] = None,
edge_size: Optional[float] = None) -> None:
"""
Parameters
----------
name_in : str
Unique name of the module instance.
image_in_tag : str
Tag of the database entry that is read as input.
image_out_tag : str
Tag of the database entry with images that is written as output.
mask_out_tag : str, None, optional
Tag of the database entry with the mask that is written as output. If set to None, no
mask array is saved.
norm : bool
Normalize each image by its Frobenius norm. Only supported for 3D datasets (i.e.
regular imaging).
resize : float, None
DEPRECATED. This parameter is currently ignored by the module and will be removed in a
future version of PynPoint.
cent_size : float, None, optional
Radius of the central mask (in arcsec). No mask is used when set to None.
edge_size : float, None, optional
Outer radius (in arcsec) beyond which pixels are masked. No outer mask is used when set
to None. If the value is larger than half the image size then it will be set to half
the image size.
Returns
-------
NoneType
None
"""
super().__init__(name_in)
self.m_image_in_port = self.add_input_port(image_in_tag)
if mask_out_tag is None:
self.m_mask_out_port = None
else:
self.m_mask_out_port = self.add_output_port(mask_out_tag)
self.m_image_out_port = self.add_output_port(image_out_tag)
self.m_cent_size = cent_size
self.m_edge_size = edge_size
self.m_norm = norm
# Raise a DeprecationWarning if the resize argument is used
if resize is not None:
warnings.warn('The \'resize\' parameter has been deprecated. Its value is currently '
'being ignored, and the argument will be removed in a future version '
'of PynPoint.', DeprecationWarning)
@typechecked
def run(self) -> None:
"""
Run method of the module. Masks and normalizes the images.
Returns
-------
NoneType
None
"""
# Get the PIXSCALE and MEMORY attributes
pixscale = self.m_image_in_port.get_attribute('PIXSCALE')
memory = self._m_config_port.get_attribute('MEMORY')
# Get the numnber of dimensions and shape
ndim = self.m_image_in_port.get_ndim()
im_shape = self.m_image_in_port.get_shape()
if ndim == 3:
# Number of images
nimages = im_shape[-3]
# Split into batches to comply with memory constraints
frames = memory_frames(memory, nimages)
elif ndim == 4:
# Process all wavelengths per exposure at once
frames = np.linspace(0, im_shape[-3], im_shape[-3]+1)
if self.m_norm and ndim == 4:
warnings.warn('The \'norm\' parameter does not support 4D datasets and will therefore '
'be ignored.')
# Convert m_cent_size and m_edge_size from arcseconds to pixels
if self.m_cent_size is not None:
self.m_cent_size /= pixscale
if self.m_edge_size is not None:
self.m_edge_size /= pixscale
# Create 2D disk mask which will be applied to every frame
mask = create_mask((int(im_shape[-2]), int(im_shape[-1])),
(self.m_cent_size, self.m_edge_size)).astype(bool)
# Keep track of the normalization vectors in case we are normalizing the images (if
# we are not normalizing, this list will remain empty)
norms = list()
start_time = time.time()
# Run the PSFpreparationModule for each subset of frames
for i in range(frames[:-1].size):
# Print progress to command line
progress(i, len(frames[:-1]), 'Preparing images for PSF subtraction...', start_time)
if ndim == 3:
# Get the images and ensure they have the correct 3D shape with the following
# three dimensions: (batch_size, height, width)
images = self.m_image_in_port[frames[i]:frames[i+1], ]
if images.ndim == 2:
warnings.warn('The input data has 2 dimensions whereas 3 dimensions are '
'required. An extra dimension has been added.')
images = images[np.newaxis, ...]
elif ndim == 4:
# Process all wavelengths per exposure at once
images = self.m_image_in_port[:, i, ]
# Apply the mask, i.e., set all pixels to 0 where the mask is False
images[:, ~mask] = 0.
# If desired, normalize the images using the Frobenius norm
if self.m_norm and ndim == 3:
im_norm = np.linalg.norm(images, ord='fro', axis=(1, 2))
images /= im_norm[:, np.newaxis, np.newaxis]
norms.append(im_norm)
# Write processed images to output port
if ndim == 3:
self.m_image_out_port.append(images, data_dim=3)
elif ndim == 4:
self.m_image_out_port.append(images, data_dim=4)
# Store information about mask
if self.m_mask_out_port is not None:
self.m_mask_out_port.set_all(mask)
self.m_mask_out_port.copy_attributes(self.m_image_in_port)
# Copy attributes from input port
self.m_image_out_port.copy_attributes(self.m_image_in_port)
# If the norms list is not empty (i.e., if we have computed the norm for every image),
# we can also save the corresponding norm vector as an additional attribute
if norms:
self.m_image_out_port.add_attribute(name='norm',
value=np.hstack(norms),
static=False)
# Save cent_size and edge_size as attributes to the output port
if self.m_cent_size is not None:
self.m_image_out_port.add_attribute(name='cent_size',
value=self.m_cent_size * pixscale,
static=True)
if self.m_edge_size is not None:
self.m_image_out_port.add_attribute(name='edge_size',
value=self.m_edge_size * pixscale,
static=True)
class AngleInterpolationModule(ProcessingModule):
"""
Module for calculating the parallactic angle values by interpolating between the begin and end
value of a data cube.
"""
__author__ = 'Markus Bonse, Tomas Stolker'
@typechecked
def __init__(self,
name_in: str,
data_tag: str) -> None:
"""
Parameters
----------
name_in : str
Unique name of the module instance.
data_tag : str
Tag of the database entry for which the parallactic angles are written as attributes.
Returns
-------
NoneType
None
"""
super().__init__(name_in)
self.m_data_in_port = self.add_input_port(data_tag)
self.m_data_out_port = self.add_output_port(data_tag)
@typechecked
def run(self) -> None:
"""
Run method of the module. Calculates the parallactic angles of each frame by linearly
interpolating between the start and end values of the data cubes. The values are written
as attributes to *data_tag*. A correction of 360 deg is applied when the start and end
values of the angles change sign at +/-180 deg.
Returns
-------
NoneType
None
"""
parang_start = self.m_data_in_port.get_attribute('PARANG_START')
parang_end = self.m_data_in_port.get_attribute('PARANG_END')
steps = self.m_data_in_port.get_attribute('NFRAMES')
if 'NDIT' in self.m_data_in_port.get_all_non_static_attributes():
ndit = self.m_data_in_port.get_attribute('NDIT')
if not np.all(ndit == steps):
warnings.warn('There is a mismatch between the NDIT and NFRAMES values. The '
'parallactic angles are calculated with a linear interpolation by '
'using NFRAMES steps. A frame selection should be applied after '
'the parallactic angles are calculated.')
new_angles = []
start_time = time.time()
for i, _ in enumerate(parang_start):
progress(i, len(parang_start), 'Interpolating parallactic angles...', start_time)
if parang_start[i] < -170. and parang_end[i] > 170.:
parang_start[i] += 360.
elif parang_end[i] < -170. and parang_start[i] > 170.:
parang_end[i] += 360.
if steps[i] == 1:
new_angles = np.append(new_angles,
[(parang_start[i] + parang_end[i])/2.])
elif steps[i] != 1:
new_angles = np.append(new_angles,
np.linspace(parang_start[i],
parang_end[i],
num=steps[i]))
self.m_data_out_port.add_attribute('PARANG',
new_angles,
static=False)
class SortParangModule(ProcessingModule):
"""
Module to sort the images and attributes with increasing ``INDEX``.
"""
__author__ = 'Tomas Stolker'
@typechecked
def __init__(self,
name_in: str,
image_in_tag: str,
image_out_tag: str) -> None:
"""
Parameters
----------
name_in : str
Unique name of the module instance.
image_in_tag : str
Database tag with the input data.
image_out_tag : str
Database tag where the output data will be stored. Should be different from
``image_in_tag``.
Returns
-------
NoneType
None
"""
super().__init__(name_in)
self.m_image_in_port = self.add_input_port(image_in_tag)
self.m_image_out_port = self.add_output_port(image_out_tag)
@typechecked
def run(self) -> None:
"""
Run method of the module. Sorts the images and attributes with increasing ``INDEX``.
Therefore, the images are sorted by there original (usually chronological) order.
Returns
-------
NoneType
None
"""
memory = self._m_config_port.get_attribute('MEMORY')
index = self.m_image_in_port.get_attribute('INDEX')
ndim = self.m_image_in_port.get_ndim()
nimages = self.m_image_in_port.get_shape()[-3]
index_new = np.zeros(index.shape, dtype=int)
if 'PARANG' in self.m_image_in_port.get_all_non_static_attributes():
parang = self.m_image_in_port.get_attribute('PARANG')
parang_new = np.zeros(parang.shape)
else:
parang_new = None
if 'STAR_POSITION' in self.m_image_in_port.get_all_non_static_attributes():
star = self.m_image_in_port.get_attribute('STAR_POSITION')
star_new = np.zeros(star.shape)
else:
star_new = None
index_sort = np.argsort(index)
frames = memory_frames(memory, nimages)
start_time = time.time()
for i, _ in enumerate(frames[:-1]):
progress(i, len(frames[:-1]), 'Sorting images in time...', start_time)
index_new[frames[i]:frames[i+1]] = index[index_sort[frames[i]:frames[i+1]]]
if parang_new is not None:
parang_new[frames[i]:frames[i+1]] = parang[index_sort[frames[i]:frames[i+1]]]
if star_new is not None:
star_new[frames[i]:frames[i+1]] = star[index_sort[frames[i]:frames[i+1]]]
# HDF5 indexing elements must be in increasing order
for item in index_sort[frames[i]:frames[i+1]]:
if ndim == 3:
self.m_image_out_port.append(self.m_image_in_port[item, ], data_dim=3)
elif ndim == 4:
self.m_image_out_port.append(self.m_image_in_port[:, item, ], data_dim=4)
self.m_image_out_port.copy_attributes(self.m_image_in_port)
self.m_image_out_port.add_history('SortParangModule', 'sorted by INDEX')
self.m_image_out_port.add_attribute('INDEX', index_new, static=False)
if parang_new is not None:
self.m_image_out_port.add_attribute('PARANG', parang_new, static=False)
if star_new is not None:
self.m_image_out_port.add_attribute('STAR_POSITION', star_new, static=False)
self.m_image_out_port.close_port()
class AngleCalculationModule(ProcessingModule):
"""
Module for calculating the parallactic angles. The start time of the observation is taken and
multiples of the exposure time are added to derive the parallactic angle of each frame inside
the cube. Instrument specific overheads are included.
"""
__author__ = 'Alexander Bohn, Tomas Stolker'
@typechecked
def __init__(self,
name_in: str,
data_tag: str,
instrument: str = 'NACO') -> None:
"""
Parameters
----------
name_in : str
Unique name of the module instance.
data_tag : str
Tag of the database entry for which the parallactic angles are written as attributes.
instrument : str
Instrument name ('NACO', 'SPHERE/IRDIS', or 'SPHERE/IFS').
Returns
-------
NoneType
None
"""
super().__init__(name_in)
# Parameters
self.m_instrument = instrument
# Set parameters according to choice of instrument
if self.m_instrument == 'NACO':
# pupil offset in degrees
self.m_pupil_offset = 0. # No offset here
# no overheads in cube mode, since cube is read out after all individual exposures
# see NACO manual page 62 (v102)
self.m_O_START = 0.
self.m_DIT_DELAY = 0.
self.m_ROT = 0.
# rotator offset in degrees
self.m_rot_offset = 89.44 # According to NACO manual page 65 (v102)
elif self.m_instrument == 'SPHERE/IRDIS':
# pupil offset in degrees
self.m_pupil_offset = -135.99 # According to SPHERE manual page 64 (v102)
# overheads in cube mode (several NDITS) in hours
self.m_O_START = 0.3 / 3600. # According to SPHERE manual page 90/91 (v102)
self.m_DIT_DELAY = 0.1 / 3600. # According to SPHERE manual page 90/91 (v102)
self.m_ROT = 0.838 / 3600. # According to SPHERE manual page 90/91 (v102)
# rotator offset in degrees
self.m_rot_offset = 0. # no offset here
elif self.m_instrument == 'SPHERE/IFS':
# pupil offset in degrees
self.m_pupil_offset = -135.99 - 100.48 # According to SPHERE manual page 64 (v102)
# overheads in cube mode (several NDITS) in hours
self.m_O_START = 0.3 / 3600. # According to SPHERE manual page 90/91 (v102)
self.m_DIT_DELAY = 0.2 / 3600. # According to SPHERE manual page 90/91 (v102)
self.m_ROT = 1.65 / 3600. # According to SPHERE manual page 90/91 (v102)
# rotator offset in degrees
self.m_rot_offset = 0. # no offset here
else:
raise ValueError('The instrument argument should be set to either \'NACO\', '
'\'SPHERE/IRDIS\', or \'SPHERE/IFS\'.')
self.m_data_in_port = self.add_input_port(data_tag)
self.m_data_out_port = self.add_output_port(data_tag)
@typechecked
def _attribute_check(self,
ndit: np.ndarray,
steps: np.ndarray) -> None:
if not np.all(ndit == steps):
warnings.warn('There is a mismatch between the NDIT and NFRAMES values. A frame '
'selection should be applied after the parallactic angles are '
'calculated.')
if self.m_instrument == 'SPHERE/IFS':
warnings.warn('AngleCalculationModule has not been tested for SPHERE/IFS data.')
if self.m_instrument in ('SPHERE/IRDIS', 'SPHERE/IFS'):
if self._m_config_port.get_attribute('RA') != 'ESO INS4 DROT2 RA':
warnings.warn('For SPHERE data it is recommended to use the header keyword '
'\'ESO INS4 DROT2 RA\' to specify the object\'s right ascension. '
'The input will be parsed accordingly. Using the regular '
'\'RA\' keyword will lead to wrong parallactic angles.')
if self._m_config_port.get_attribute('DEC') != 'ESO INS4 DROT2 DEC':
warnings.warn('For SPHERE data it is recommended to use the header keyword '
'\'ESO INS4 DROT2 DEC\' to specify the object\'s declination. '
'The input will be parsed accordingly. Using the regular '
'\'DEC\' keyword will lead to wrong parallactic angles.')
@typechecked
def run(self) -> None:
"""
Run method of the module. Calculates the parallactic angles from the position of the object
on the sky and the telescope location on earth. The start of the observation is used to
extrapolate for the observation time of each individual image of a data cube. The values
are written as PARANG attributes to *data_tag*.
Returns
-------
NoneType
None
"""
# Load cube sizes
steps = self.m_data_in_port.get_attribute('NFRAMES')
ndit = self.m_data_in_port.get_attribute('NDIT')
self._attribute_check(ndit, steps)
# Load exposure time [hours]
exptime = self.m_data_in_port.get_attribute('DIT')/3600.
# Load telescope location
tel_lat = self.m_data_in_port.get_attribute('LATITUDE')
tel_lon = self.m_data_in_port.get_attribute('LONGITUDE')
# Load temporary target position
tmp_ra = self.m_data_in_port.get_attribute('RA')
tmp_dec = self.m_data_in_port.get_attribute('DEC')
# Parse to degree depending on instrument
if 'SPHERE' in self.m_instrument:
# get sign of declination
tmp_dec_sign = np.sign(tmp_dec)
tmp_dec = np.abs(tmp_dec)
# parse RA
tmp_ra_s = tmp_ra % 100
tmp_ra_m = ((tmp_ra - tmp_ra_s) / 1e2) % 100
tmp_ra_h = ((tmp_ra - tmp_ra_s - tmp_ra_m * 1e2) / 1e4)
# parse DEC
tmp_dec_s = tmp_dec % 100
tmp_dec_m = ((tmp_dec - tmp_dec_s) / 1e2) % 100
tmp_dec_d = ((tmp_dec - tmp_dec_s - tmp_dec_m * 1e2) / 1e4)
# get RA and DEC in degree
ra = (tmp_ra_h + tmp_ra_m / 60. + tmp_ra_s / 3600.) * 15.
dec = tmp_dec_sign * (tmp_dec_d + tmp_dec_m / 60. + tmp_dec_s / 3600.)
else:
ra = tmp_ra
dec = tmp_dec
# Load start times of exposures
obs_dates = self.m_data_in_port.get_attribute('DATE')
# Load pupil positions during observations
if self.m_instrument == 'NACO':
pupil_pos = self.m_data_in_port.get_attribute('PUPIL')
elif self.m_instrument == 'SPHERE/IRDIS':
pupil_pos = np.zeros(steps.shape)
elif self.m_instrument == 'SPHERE/IFS':
pupil_pos = np.zeros(steps.shape)
new_angles = np.array([])
pupil_pos_arr = np.array([])
start_time = time.time()
# Calculate parallactic angles for each cube
for i, tmp_steps in enumerate(steps):
progress(i, len(steps), 'Calculating parallactic angles...', start_time)
t = Time(obs_dates[i].decode('utf-8'),
location=EarthLocation(lat=tel_lat, lon=tel_lon))
sid_time = t.sidereal_time('apparent').value
# Extrapolate sideral times from start time of the cube for each frame of it
sid_time_arr = np.linspace(sid_time+self.m_O_START,
(sid_time+self.m_O_START) +
(exptime+self.m_DIT_DELAY + self.m_ROT)*(tmp_steps-1),
tmp_steps)
# Convert to degrees
sid_time_arr_deg = sid_time_arr * 15.
# Calculate hour angle in degrees
hour_angle = sid_time_arr_deg - ra[i]
# Conversion to radians:
hour_angle_rad = np.deg2rad(hour_angle)
dec_rad = np.deg2rad(dec[i])
lat_rad = np.deg2rad(tel_lat)
p_angle = np.arctan2(np.sin(hour_angle_rad),
(np.cos(dec_rad)*np.tan(lat_rad) -
np.sin(dec_rad)*np.cos(hour_angle_rad)))
new_angles = np.append(new_angles, np.rad2deg(p_angle))
pupil_pos_arr = np.append(pupil_pos_arr, np.ones(tmp_steps)*pupil_pos[i])
# Correct for rotator (SPHERE) or pupil offset (NACO)
if self.m_instrument == 'NACO':
# See NACO manual page 65 (v102)
new_angles_corr = new_angles - (90. + (self.m_rot_offset-pupil_pos_arr))
elif self.m_instrument == 'SPHERE/IRDIS':
# See SPHERE manual page 64 (v102)
new_angles_corr = new_angles - self.m_pupil_offset
elif self.m_instrument == 'SPHERE/IFS':
# See SPHERE manual page 64 (v102)
new_angles_corr = new_angles - self.m_pupil_offset
indices = np.where(new_angles_corr < -180.)[0]
if indices.size > 0:
new_angles_corr[indices] += 360.
indices = np.where(new_angles_corr > 180.)[0]
if indices.size > 0:
new_angles_corr[indices] -= 360.
self.m_data_out_port.add_attribute('PARANG', new_angles_corr, static=False)
class SDIpreparationModule(ProcessingModule):
"""
Module for preparing continuum frames for dual-band simultaneous differential imaging.
"""
__author__ = 'Gabriele Cugno, Tomas Stolker'
@typechecked
def __init__(self,
name_in: str,
image_in_tag: str,
image_out_tag: str,
wavelength: Tuple[float, float],
width: Tuple[float, float]) -> None:
"""
Parameters
----------
name_in : str
Unique name of the module instance.
image_in_tag : str
Tag of the database entry that is read as input.
image_out_tag : str
Tag of the database entry that is written as output. Should be different from
*image_in_tag*.
wavelength : tuple(float, float)
The central wavelengths of the line and continuum filter, (line, continuum), in
arbitrary but identical units.
width : tuple(float, float)
The equivalent widths of the line and continuum filter, (line, continuum), in
arbitrary but identical units.
Returns
-------
NoneType
None
"""
super().__init__(name_in)
self.m_image_in_port = self.add_input_port(image_in_tag)
self.m_image_out_port = self.add_output_port(image_out_tag)
self.m_line_wvl = wavelength[0]
self.m_cnt_wvl = wavelength[1]
self.m_line_width = width[0]
self.m_cnt_width = width[1]
@typechecked
def run(self) -> None:
"""
Run method of the module. Normalizes the images for the different filter widths,
upscales the images, and crops the images to the initial image shape in order to
align the PSF patterns.
Returns
-------
NoneType
None
"""
wvl_factor = self.m_line_wvl/self.m_cnt_wvl
width_factor = self.m_line_width/self.m_cnt_width
nimages = self.m_image_in_port.get_shape()[0]
start_time = time.time()
for i in range(nimages):
progress(i, nimages, 'Preparing images for dual-band SDI...', start_time)
image = self.m_image_in_port[i, ]
im_scale = width_factor * scale_image(image, wvl_factor, wvl_factor)
if i == 0:
npix_del = im_scale.shape[-1] - image.shape[-1]
if npix_del % 2 == 0:
npix_del_a = int(npix_del/2)
npix_del_b = int(npix_del/2)
else:
npix_del_a = int((npix_del-1)/2)
npix_del_b = int((npix_del+1)/2)
im_crop = im_scale[npix_del_a:-npix_del_b, npix_del_a:-npix_del_b]
if npix_del % 2 == 1:
im_crop = shift_image(im_crop, (-0.5, -0.5), interpolation='spline')
self.m_image_out_port.append(im_crop, data_dim=3)
history = f'(line, continuum) = ({self.m_line_wvl}, {self.m_cnt_wvl})'
self.m_image_out_port.copy_attributes(self.m_image_in_port)
self.m_image_out_port.add_history('SDIpreparationModule', history)
self.m_image_in_port.close_port()
| PynPoint/PynPoint | pynpoint/processing/psfpreparation.py | psfpreparation.py | py | 27,088 | python | en | code | 17 | github-code | 36 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.