repo_name
stringclasses 400
values | branch_name
stringclasses 4
values | file_content
stringlengths 16
72.5k
| language
stringclasses 1
value | num_lines
int64 1
1.66k
| avg_line_length
float64 6
85
| max_line_length
int64 9
949
| path
stringlengths 5
103
| alphanum_fraction
float64 0.29
0.89
| alpha_fraction
float64 0.27
0.89
|
|---|---|---|---|---|---|---|---|---|---|
fengd13/tcpreplay_GUI
|
refs/heads/master
|
# -*- coding: utf-8 -*-
"""
Created on Wed Oct 31 10:36:40 2018
@author: fd
"""
# !/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Tue Oct 23 18:00:59 2018
@author: key1234
"""
# -*- coding: utf-8 -*-
"""
Created on Tue Oct 23 14:24:35 2018
@author: fd
"""
import sys
import time
import subprocess
import json
from PyQt5 import QtCore, QtGui, QtWidgets
from PyQt5.QtCore import *
from PyQt5.QtWidgets import (QWidget, QLabel, QLineEdit,
QTextEdit, QApplication, QPushButton, QInputDialog,
QHBoxLayout, QVBoxLayout, QListWidget, QFileDialog, QTabWidget, QSlider, QCheckBox,
QMessageBox, QScrollArea,QTextBrowser)
config_dic = {}
send_list = {}
res_cmd = ""
tab_name = []
pcap_path = "./pcap/"
twolineflag = 0
class Checkboxlist(QtWidgets.QWidget):
def __init__(self, test_type):
self.test_type = test_type
if test_type not in send_list.keys():
send_list[test_type] = []
super().__init__()
layout = QtWidgets.QVBoxLayout()
items = config_dic[test_type]
for txt in items.keys():
id_ = items[txt]
checkBox = QtWidgets.QCheckBox(txt, self)
checkBox.id_ = id_
checkBox.stateChanged.connect(self.checkLanguage)
layout.addWidget(checkBox)
self.lMessage = QtWidgets.QLabel(self)
layout.addWidget(self.lMessage)
self.setLayout(layout)
def checkLanguage(self, state):
checkBox = self.sender()
if state == QtCore.Qt.Unchecked:
for _ in checkBox.id_:
send_list[self.test_type].remove(_)
elif state == QtCore.Qt.Checked:
for _ in checkBox.id_:
send_list[self.test_type].append(_)
class Example(QWidget):
def __init__(self):
super().__init__()
self.initUI()
def connect(self):
if self.IPbox.text() == "":
text, ok = QInputDialog.getText(self, 'Input Dialog', '输入IP:')
if ok:
self.IPbox.setText(str(text))
# if self.usernamebox.text() == "":
# text, ok = QInputDialog.getText(self, 'Input Dialog', '输入用户名:')
# if ok:
# self.usernamebox.setText(str(text))
# if self.passwordbox.text() == "":
# text, ok = QInputDialog.getText(self, 'Input Dialog', '输入密码:')
# if ok:
# self.passwordbox.setText(str(text))
# if self.ethbox.text() == "":
# text, ok = QInputDialog.getText(self, 'Input Dialog', '输入网口号:(eg:eth1)')
# if ok:
# self.ethbox.setText(str(text))
self.IP = self.IPbox.text()
self.username = self.usernamebox.text()
self.password = self.passwordbox.text()
self.eth = self.ethbox.text()
QMessageBox.information(self, "", "需要一段时间,请等待")
#a, b = subprocess.getstatusoutput('ping ' + self.IP) # a是退出状态 b是输出的结果
self.thread_connect= MyThread(re='ping -t 100 -c 2 ' + self.IP) # 创建一个线程 发送cmd
self.thread_connect.sec_changed_signal.connect(self.update_state_connect) # cmd线程发过来的信号挂接到槽:update_state
#self.thread2.sec_changed_signal.connect(self.update_time) # 计时线程发过来的信号挂接到槽:update_time
self.thread_connect.start()
def update_state_connect(self,b):
self.resultbox.setText(b)
if "ms" in b and "100% packet loss" not in b:
QMessageBox.information(self, # 使用infomation信息框
"注意",
"连接成功")
else:
QMessageBox.information(self, "注意", "连接失败 请检查IP设置")
self.thread_connect.terminate()
def update(self):
QApplication.processEvents()
def read_json(self):
global config_dic
global res_cmd
global send_list
global tab_name
global pcap_path
global twolineflag
try:
fname = QFileDialog.getOpenFileName(self,
"选取文件",
"./", # 起始路径
"配置文件 (*.json)") # 设置文件扩展名过滤,用双分号间隔
with open(fname[0], 'r') as load_f:
config_dic = json.load(load_f)
send_list = {}
res_cmd = ""
tab_name = []
pcap_path = ""
res_cmd = fname[0]
self.tab.clear()
for test_type in config_dic.keys():
send_list[test_type] = []
tab_name.append(test_type)
self.tab.test_type = Checkboxlist(test_type)
l = int(len(test_type) / 2)
s = test_type[0:l] + '\n' * twolineflag + test_type[l:]
self.tab.addTab(self.tab.test_type, s)
self.update()
except:
return 1
def initUI(self):
# 读取配置文件
global config_dic
global twolineflag
try:
with open('config.json', 'r') as load_f:
config_dic = json.load(load_f)
except:
config_dic = config_dic
QMessageBox.information(self, # 使用infomation信息框
"注意",
"未找到配置文件 请手动选择")
self.read_json()
# 初始化连接
self.IPbox = QLineEdit()
#self.IPbox.setText("192.168.201.129")
self.re_num = 1
self.usernamebox = QLineEdit()
self.ethbox = QLineEdit()
self.passwordbox = QLineEdit()
self.connect_button = QPushButton("测试连接")
self.update_button = QPushButton("更新配置")
hbox1 = QHBoxLayout()
hbox1.addWidget(QLabel("被测试IP:"))
hbox1.addWidget(self.IPbox)
hbox1.addWidget(self.connect_button)
hbox1.addWidget(QLabel(" "))
hbox1.addWidget(QLabel("本机用户名:"))
hbox1.addWidget(self.usernamebox)
hbox1.addWidget(QLabel("本机密码:"))
hbox1.addWidget(self.passwordbox)
hbox1.addWidget(QLabel("网口号:"))
hbox1.addWidget(self.ethbox)
hbox1.addWidget(self.update_button)
self.connect_button.clicked.connect(self.connect)
self.update_button.clicked.connect(self.read_json)
# 中间
self.topFiller = QWidget()
self.topFiller.setMinimumSize(2500, 2000) #######设置滚动条的尺寸
self.tab = QTabWidget()
for test_type in config_dic.keys():
send_list[test_type] = []
tab_name.append(test_type)
self.tab.test_type = Checkboxlist(test_type)
l = int(len(test_type) / 2)
s = test_type[0:l] + '\n' * twolineflag + test_type[l:]
self.tab.addTab(self.tab.test_type, s)
# tab.tabBar().setFixedHeight(48)
hbox2 = QHBoxLayout(self.topFiller)
hbox2.addWidget(self.tab)
#hbox2.addWidget(self.scroll)
self.scroll = QScrollArea()
self.scroll.setWidget(self.topFiller)
# 辅助功能
hbox3 = QHBoxLayout()
hbox4 = QHBoxLayout()
self.re_timebox = QSlider(Qt.Horizontal, self)
self.re_timebox.setMinimum(1)
self.re_timebox.setMaximum(1000)
self.re_timebox.valueChanged[int].connect(self.changeValue)
self.num = QLabel("1")
self.fullspeed = QCheckBox("全速发送")
hbox3.addWidget(self.fullspeed) # -R
hbox4.addWidget(QLabel(" 重复次数:"))
hbox4.addWidget(self.num)
hbox4.addWidget(self.re_timebox)
hbox3.addWidget(QLabel(" 最大发包数量:"))
self.maxpacknumbox = QLineEdit() # -L
hbox3.addWidget(self.maxpacknumbox)
hbox3.addWidget(QLabel(" 每秒发送报文数:"))
self.packpsbox = QLineEdit() # -p
hbox3.addWidget(self.packpsbox)
'''hbox3.addWidget(QLabel(" 指定MTU:"))
self.MTUbox = QLineEdit() # -t
hbox3.addWidget(self.MTUbox)'''
hbox3.addWidget(QLabel("发包速度/Mbps:"))
self.Mbpsbox = QLineEdit()
hbox3.addWidget(self.Mbpsbox)
# 开始测试
self.start_button = QPushButton("开始发送数据包")
self.start_button.clicked.connect(self.start_test)
self.stop_button = QPushButton("停止发送数据包")
self.stop_button.clicked.connect(self.stop_test)
hbox5 = QHBoxLayout()
hbox5.addWidget(self.start_button)
hbox5.addWidget(self.stop_button)
# hbox5.addWidget(QLabel(" time:"))
# self.timebox = QLineEdit()
# hbox5.addWidget(self.timebox)
# 显示输出结果
self.resultbox = QTextBrowser()
vbox = QVBoxLayout()
vbox.addLayout(hbox1)
vbox.addWidget(QLabel("选择测试模式:"))
#vbox.addLayout(hbox2)
vbox.addWidget(self.scroll)
vbox.addWidget(QLabel("可选项:"))
vbox.addLayout(hbox3)
vbox.addLayout(hbox4)
vbox.addLayout(hbox5)
vbox.addWidget(QLabel("状态提示信息:"))
vbox.addWidget(self.resultbox)
self.setLayout(vbox)
# self.setGeometry(300, 300, 290, 150)
self.setWindowTitle('tcpreplay_gui')
self.show()
def changeValue(self, value):
self.num.setText(str(value))
self.re_num = value
def stop_test(self):
if "数据包发送成功" not in self.resultbox.toPlainText() and " 默认发包速度下" in self.resultbox.toPlainText() :
try:
self.thread.terminate()
self.thread2.terminate()
self.resultbox.setText("")
except:
self.resultbox.setText("")
else:
self.resultbox.setText("")
def start_test(self):
self.resultbox.setText("")
# tcprewrite是否需要
self.resultbox.setText("")
# -i 设置eth端口
if self.ethbox.text() == "":
text, ok = QInputDialog.getText(self, 'Input Dialog', '输入网口号:(eg:eth1)')
if ok:
self.ethbox.setText(str(text))
if self.passwordbox.text() == "":
text, ok = QInputDialog.getText(self, 'Input Dialog', '输入密码')
if ok:
self.passwordbox.setText(str(text))
re = "echo " + self.passwordbox.text() + "|sudo -S " + "tcpreplay -i " + self.ethbox.text() + " "
# 最大速率发送 -t
if self.fullspeed.isChecked():
re += " -t "
else:
re = re
# 重复次数
if self.re_num > 1:
re = re + "-l " + str(self.re_num) + " "
''''#制定MTU
if not self.MTUbox.text()=="":
re+=" - "+ self.MTUbox.text()+' '''''
# 每秒发包数量
if not self.packpsbox.text() == "":
re += ' -p ' + self.packpsbox.text() + ' '
# 发送速度MB/s
if not self.Mbpsbox.text() == "":
re += ' -M ' + self.Mbpsbox.text() + ' '
# 最大发包数量
if not self.maxpacknumbox.text() == "":
re += ' -L ' + self.maxpacknumbox.text() + ' '
# 数据包名称 路径应和json文件位置相同
tabindex = self.tab.currentIndex()
tn = (tab_name[tabindex])
pcaplist = send_list[tn]
if len(pcaplist) == 0:
QMessageBox.information(self, # 使用infomation信息框
"注意",
"请选择至少一个包")
return
if len(pcaplist) == 1:
re +=pcap_path+pcaplist[0]
else:
temp = re
re = ""
for i in pcaplist:
re += temp + pcap_path+i + " &&"
re = re[0:-2]
# self.resultbox.setText(self.resultbox.toPlainText() + '\r\n' + re)
self.starttime = time.time()
self.resultbox.setText(self.resultbox.toPlainText() + '\r\n' + "正在发送数据包 默认发包速度下可能需要较长时间 请耐心等待。。。")
self.thread = MyThread(re=re) # 创建一个线程 发送cmd
self.thread2 = MyThread2(self.starttime) # 创建一个线程 计时
self.thread.sec_changed_signal.connect(self.update_state) # cmd线程发过来的信号挂接到槽:update_state
self.thread2.sec_changed_signal.connect(self.update_time) # 计时线程发过来的信号挂接到槽:update_time
self.thread.start()
self.thread2.start()
def update_state(self, b):
if "Actual" in b:
self.resultbox.setText("数据包发送成功!" + '\r\n结果统计信息:\r\n' + b[b.index("Actual"):])
else:
QMessageBox.information(self, # 使用infomation信息框
"注意",
"未能成功发送 请检查网口设置与软件是否正确安装")
# self.resultbox.setText(self.resultbox.toPlainText() + '\r\n' + b)
self.thread.terminate()
self.thread2.terminate()
def update_time(self, a):
self.resultbox.setText(
self.resultbox.toPlainText() + '\r\n' + "已用时间:" + str(round(time.time() - self.starttime)) + "s")
class MyThread(QThread):
sec_changed_signal = pyqtSignal(str) # 信号类型:int
def __init__(self, re=None, parent=None):
super().__init__(parent)
self.re = re
def run(self):
a, b = subprocess.getstatusoutput(self.re)
self.sec_changed_signal.emit(b) # 发射信号
class MyThread2(QThread):
sec_changed_signal = pyqtSignal(str)
def __init__(self, re=None, parent=None):
super().__init__(parent)
self.re = re
self.isrunning = True
def run(self):
b = " "
for i in range(1000):
time.sleep(5)
self.sec_changed_signal.emit(b) # 发射信号
def stop(self):
self.isrunning = False
if __name__ == '__main__':
app = QApplication(sys.argv)
ex = Example()
sys.exit(app.exec_())
|
Python
| 405
| 32.671604
| 113
|
/tcpreplay_gui.py
| 0.519798
| 0.50819
|
fengd13/tcpreplay_GUI
|
refs/heads/master
|
# -*- coding: utf-8 -*-
"""
Created on Wed Oct 24 09:50:29 2018
@author: fd
"""
import json
dic = {
"真实流量测试":
{
"dspflow": ["flow.pcap"],
"flow": ["flow2.pcap", "3.pcap"],
},
"恶意流量测试":
{
"情况1": ["6.pcap"],
"情况2": ["7.pcap", "8.pcap"],
"情况3": ["9.pcap", "10.pcap"],
},
"具体流量测试":
{
"ARP": ["arp.pcap"],
"DB2": ["db2.pcap"],
"DNS": ["dns.pcap"],
"FTP": ["dns.pcap"],
"HTTP": ["http.pcap"],
"HTTPS": ["https.pcap"],
"MEMCACHE": ["memcached.pcap"],
"MONGO": ["mongo.pcap"],
"MYSQL": ["mysql.pcap"],
"ORACLE": ["oracle.pcap"],
"REDIS": ["redis.pcap"],
"SMTP": ["smtp.pcap"],
"SNMPv1": ["snmp1.pcap"],
"SNMPv2": ["snmp2.pcap"],
"SNMPv3": ["snmp3.pcap"],
"SSH": ["ssh.pcap"],
"SSL": ["ssl.pcap"],
"SYBASE": ["sybase.pcap"],
"TELNET": ["telnet.pcap"],
"UDP": ["udp.pcap"],
"VLAN": ["vlan.pcap"],
}
}
with open("config.json","w") as dump_f:
json.dump(dic,dump_f,ensure_ascii=False)
with open('config.json', 'r') as json_file:
"""
读取该json文件时,先按照gbk的方式对其解码再编码为utf-8的格式
"""
data = json_file.read()
print(type(data)) # type(data) = 'str'
result = json.loads(data)
print(result)
|
Python
| 61
| 23.245901
| 45
|
/makejson.py
| 0.396753
| 0.375325
|
Imenbaa/Validity-index
|
refs/heads/master
|
# -*- coding: utf-8 -*-
"""
Created on Tue Nov 27 20:07:59 2018
@author: Imen
"""
import numpy as np
import pandas as pd
from sklearn.cluster import KMeans
import matplotlib.pyplot as plt
from sklearn.datasets.samples_generator import make_blobs
#Create a database of random values of 4 features and a fixed number of clusters
n_clusters=6
dataset,y=make_blobs(n_samples=200,n_features=4,centers=n_clusters)
#plt.scatter(dataset[:,2],dataset[:,3])
#Firstly,i will calculate Vsc for this number of clusters
#Create the k-means
kmeans=KMeans(init="k-means++",n_clusters=n_clusters,random_state=0)
kmeans.fit(dataset)
mu_i=kmeans.cluster_centers_
k_means_labels=kmeans.labels_
mu=dataset.mean(axis=0)
SB=np.zeros((4,4))
for line in mu_i:
diff1=line.reshape(1,4)-mu.reshape(1,4)
diff2=np.transpose(line.reshape(1,4)-mu.reshape(1,4))
SB+=diff1*diff2
Sw=np.zeros((4,4))
sum_in_cluster=np.zeros((4,4))
comp_c=0
for k in range(n_clusters):
mes_points=(k_means_labels==k)
cluster_center=mu_i[k]
for i in dataset[mes_points]:
diff11=i.reshape(1,4)-cluster_center.reshape(1,4)
diff22=np.transpose(i.reshape(1,4)-cluster_center.reshape(1,4))
sum_in_cluster+=diff11*diff22
Sw+=sum_in_cluster
comp_c+=np.trace(Sw)
sep_c=np.trace(SB)
Vsc=sep_c/comp_c
print("For n_clusters=",n_clusters," => Vsc=",Vsc)
#Secondly,i will determine Vsc for each number of cluster from 2 to 10
#Define a function validity_index
def validity_index(c):
kmeans=KMeans(init="k-means++",n_clusters=c,random_state=0)
kmeans.fit(dataset)
#mu_i is the centers of clusters
mu_i=kmeans.cluster_centers_
k_means_labels=kmeans.labels_
#mu is the center of the whole dataset
mu=dataset.mean(axis=0)
#initialize the between clusters matrix
SB=np.zeros((4,4))
for line in mu_i:
diff1=line.reshape(1,4)-mu.reshape(1,4)
diff2=np.transpose(line.reshape(1,4)-mu.reshape(1,4))
SB+=diff1*diff2
comp_c=0
#initialize the within matrix
Sw=np.zeros((4,4))
sum_in_cluster=np.zeros((4,4))
for k in range(c):
mes_points=(k_means_labels==k)
cluster_center=mu_i[k]
for i in dataset[mes_points]:
diff11=i.reshape(1,4)-cluster_center.reshape(1,4)
diff22=np.transpose(i.reshape(1,4)-cluster_center.reshape(1,4))
sum_in_cluster+=diff11*diff22
Sw+=sum_in_cluster
#calculate the compactness in each cluster
comp_c+=np.trace(Sw)
#define the separation between clusters
sep_c=np.trace(SB)
#determin the Vsc
Vsc=sep_c/comp_c
return Vsc
#We have to find that the max Vsc is for the n_cluster defined initially
Vsc_vector=[]
cc=[2,3,4,5,6,7,8,9,10]
for i in cc:
Vsc_vector.append(validity_index(i))
print("Number of clusters which has max of Vsc:",Vsc_vector.index(max(Vsc_vector))+2 ,"=> Vsc=",max(Vsc_vector))
|
Python
| 88
| 31.852272
| 112
|
/validity_index_.py
| 0.657939
| 0.621349
|
thinkAmi-sandbox/AWS_CDK-sample
|
refs/heads/master
|
import os
import boto3
from numpy.random import rand
def lambda_handler(event, context):
body = f'{event["message"]} \n value: {rand()}'
client = boto3.client('s3')
client.put_object(
Bucket=os.environ['BUCKET_NAME'],
Key='sfn_first.txt',
Body=body,
)
return {
'body': body,
'message': event['message'],
}
|
Python
| 18
| 19.722221
| 51
|
/step_functions/step_functions/lambda_function/first/lambda_function.py
| 0.571046
| 0.563003
|
thinkAmi-sandbox/AWS_CDK-sample
|
refs/heads/master
|
import pathlib
from aws_cdk import core
from aws_cdk.aws_iam import PolicyStatement, Effect, ManagedPolicy, ServicePrincipal, Role
from aws_cdk.aws_lambda import AssetCode, LayerVersion, Function, Runtime
from aws_cdk.aws_s3 import Bucket
from aws_cdk.aws_stepfunctions import Task, StateMachine, Parallel
from aws_cdk.aws_stepfunctions_tasks import InvokeFunction, StartExecution
from settings import AWS_SCIPY_ARN
class StepFunctionsStack(core.Stack):
def __init__(self, scope: core.Construct, id: str, **kwargs) -> None:
super().__init__(scope, id, **kwargs)
self.lambda_path_base = pathlib.Path(__file__).parents[0].joinpath('lambda_function')
self.bucket = self.create_s3_bucket()
self.managed_policy = self.create_managed_policy()
self.role = self.create_role()
self.first_lambda = self.create_first_lambda()
self.second_lambda = self.create_other_lambda('second')
self.third_lambda = self.create_other_lambda('third')
self.error_lambda = self.create_other_lambda('error')
self.sub_state_machine = self.create_sub_state_machine()
self.main_state_machine = self.create_main_state_machine()
def create_s3_bucket(self):
return Bucket(
self,
'S3 Bucket',
bucket_name=f'sfn-bucket-by-aws-cdk',
)
def create_managed_policy(self):
statement = PolicyStatement(
effect=Effect.ALLOW,
actions=[
"s3:PutObject",
],
resources=[
f'{self.bucket.bucket_arn}/*',
]
)
return ManagedPolicy(
self,
'Managed Policy',
managed_policy_name='sfn_lambda_policy',
statements=[statement],
)
def create_role(self):
service_principal = ServicePrincipal('lambda.amazonaws.com')
return Role(
self,
'Role',
assumed_by=service_principal,
role_name='sfn_lambda_role',
managed_policies=[self.managed_policy],
)
def create_first_lambda(self):
function_path = str(self.lambda_path_base.joinpath('first'))
code = AssetCode(function_path)
scipy_layer = LayerVersion.from_layer_version_arn(
self, f'sfn_scipy_layer_for_first', AWS_SCIPY_ARN)
return Function(
self,
f'id_first',
# Lambda本体のソースコードがあるディレクトリを指定
code=code,
# Lambda本体のハンドラ名を指定
handler='lambda_function.lambda_handler',
# ランタイムの指定
runtime=Runtime.PYTHON_3_7,
# 環境変数の設定
environment={'BUCKET_NAME': self.bucket.bucket_name},
function_name='sfn_first_lambda',
layers=[scipy_layer],
memory_size=128,
role=self.role,
timeout=core.Duration.seconds(10),
)
def create_other_lambda(self, function_name):
function_path = str(self.lambda_path_base.joinpath(function_name))
return Function(
self,
f'id_{function_name}',
code=AssetCode(function_path),
handler='lambda_function.lambda_handler',
runtime=Runtime.PYTHON_3_7,
function_name=f'sfn_{function_name}_lambda',
memory_size=128,
timeout=core.Duration.seconds(10),
)
def create_sub_state_machine(self):
error_task = Task(
self,
'Error Task',
task=InvokeFunction(self.error_lambda),
)
# 2つめのTask
second_task = Task(
self,
'Second Task',
task=InvokeFunction(self.second_lambda),
# 渡されてきた項目を絞ってLambdaに渡す
input_path="$['first_result', 'parallel_no', 'message', 'context_name', 'const_value']",
# 結果は second_result という項目に入れる
result_path='$.second_result',
# 次のタスクに渡す項目は絞る
output_path="$['second_result', 'parallel_no']"
)
# エラーハンドリングを追加
second_task.add_catch(error_task, errors=['States.ALL'])
# 3つめのTask
third_task = Task(
self,
'Third Task',
task=InvokeFunction(self.third_lambda),
# third_lambdaの結果だけに差し替え
result_path='$',
)
# こちらもエラーハンドリングを追加
third_task.add_catch(error_task, errors=['States.ALL'])
# 2つ目のTaskの次に3つ目のTaskを起動するように定義
definition = second_task.next(third_task)
return StateMachine(
self,
'Sub StateMachine',
definition=definition,
state_machine_name='sfn_sub_state_machine',
)
def create_main_state_machine(self):
first_task = Task(
self,
'S3 Lambda Task',
task=InvokeFunction(self.first_lambda, payload={'message': 'Hello world'}),
comment='Main StateMachine',
)
parallel_task = self.create_parallel_task()
# 1番目のTaskの次に、パラレルなTask(StateMachine)をセット
definition = first_task.next(parallel_task)
return StateMachine(
self,
'Main StateMachine',
definition=definition,
state_machine_name='sfn_main_state_machine',
)
def create_parallel_task(self):
parallel_task = Parallel(
self,
'Parallel Task',
)
for i in range(1, 4):
sub_task = StartExecution(
self.sub_state_machine,
input={
'parallel_no': i,
'first_result.$': '$',
# first_taskのレスポンスにある、messageをセット
'message.$': '$.message',
# コンテキストオブジェクトの名前をセット
'context_name.$': '$$.State.Name',
# 固定値を2つ追加(ただ、Taskのinputでignore_valueは無視)
'const_value': 'ham',
'ignore_value': 'ignore',
},
)
invoke_sub_task = Task(
self,
f'Sub Task {i}',
task=sub_task,
)
parallel_task.branch(invoke_sub_task)
return parallel_task
|
Python
| 203
| 29.906404
| 100
|
/step_functions/step_functions/step_functions_stack.py
| 0.545904
| 0.541281
|
thinkAmi-sandbox/AWS_CDK-sample
|
refs/heads/master
|
def lambda_handler(event, context):
if event['parallel_no'] == 1:
raise Exception('強制的にエラーとします')
return 'only 3rd message.'
|
Python
| 5
| 27.200001
| 38
|
/step_functions/step_functions/lambda_function/third/lambda_function.py
| 0.64539
| 0.631206
|
thinkAmi-sandbox/AWS_CDK-sample
|
refs/heads/master
|
def lambda_handler(event, context):
if event['parallel_no'] % 2 == 0:
raise Exception('偶数です')
return {
'message': event['message'],
'const_value': event['const_value']
}
|
Python
| 8
| 24.875
| 43
|
/step_functions/step_functions/lambda_function/second/lambda_function.py
| 0.555556
| 0.545894
|
thinkAmi-sandbox/AWS_CDK-sample
|
refs/heads/master
|
import json
def lambda_handler(event, context):
# {
# "resource": "arn:aws:lambda:region:id:function:sfn_error_lambda",
# "input": {
# "Error": "Exception",
# "Cause": "{\"errorMessage\": \"\\u5076\\u6570\\u3067\\u3059\",
# \"errorType\": \"Exception\",
# \"stackTrace\": [\" File \\\"/var/task/lambda_function.py\\\", line 5,
# in lambda_handler\\n raise Exception('\\u5076\\u6570\\u3067\\u3059')
# \\n\"]}"
# },
# "timeoutInSeconds": null
# }
return {
# JSONをPythonオブジェクト化することで、文字化けを直す
'error_message': json.loads(event['Cause']),
}
|
Python
| 21
| 31.952381
| 95
|
/step_functions/step_functions/lambda_function/error/lambda_function.py
| 0.49422
| 0.446532
|
thinkAmi-sandbox/AWS_CDK-sample
|
refs/heads/master
|
#!/usr/bin/env python3
from aws_cdk import core
from step_functions.step_functions_stack import StepFunctionsStack
app = core.App()
# CFnのStack名を第2引数で渡す
StepFunctionsStack(app, 'step-functions')
app.synth()
|
Python
| 13
| 15.384615
| 66
|
/step_functions/app.py
| 0.774648
| 0.765258
|
thinkAmi-sandbox/AWS_CDK-sample
|
refs/heads/master
|
AWS_SCIPY_ARN = 'arn:aws:lambda:region:account_id:layer:AWSLambda-Python37-SciPy1x:2'
|
Python
| 1
| 85
| 85
|
/step_functions/step_functions/settings.example.py
| 0.8
| 0.752941
|
yywang0514/dsnre
|
refs/heads/master
|
import sys
import os
import time
import numpy as np
import torch
import torch.nn.functional as F
import argparse
import logging
from lib import *
from model import *
def train(options):
if not os.path.exists(options.folder):
os.mkdir(options.folder)
logger.setLevel(logging.DEBUG)
formatter = logging.Formatter("%(asctime)s: %(name)s: %(levelname)s: %(message)s")
hdlr = logging.FileHandler(os.path.join(options.folder, options.file_log), mode = "w")
hdlr.setFormatter(formatter)
logger.addHandler(hdlr)
logger.info("python %s" %(" ".join(sys.argv)))
#################################################################################
start_time = time.time()
msg = "Loading dicts from %s..." %(options.file_dic)
display(msg)
vocab = dicfold(options.file_dic)
word2idx, pre_train_emb, part_point = build_word2idx(vocab, options.file_emb)
msg = "Loading data from %s..." %(options.file_train)
display(msg)
train = datafold(options.file_train)
msg = "Loading data from %s..." %(options.file_test)
display(msg)
test = datafold(options.file_test)
end_time = time.time()
msg = "Loading data time: %f seconds" %(end_time - start_time)
display(msg)
options.size_vocab = len(word2idx)
if options.devFreq == -1:
options.devFreq = (len(train) + options.batch_size - 1) // options.batch_size
msg = "#inst in train: %d" %(len(train))
display(msg)
msg = "#inst in test %d" %(len(test))
display(msg)
msg = "#word vocab: %d" %(options.size_vocab)
display(msg)
msg = "=" * 30 + "Hyperparameter:" + "=" * 30
display(msg)
for attr, value in sorted(vars(options).items(), key = lambda x: x[0]):
msg = "{}={}".format(attr.upper(), value)
display(msg)
#################################################################################
msg = "=" * 30 + "model:" + "=" * 30
display(msg)
os.environ["CUDA_VISIBLE_DEVICES"] = options.gpus
if options.seed is not None:
torch.manual_seed(options.seed)
np.random.seed(options.seed)
model = Model(options.fine_tune,
pre_train_emb,
part_point,
options.size_vocab,
options.dim_emb,
options.dim_proj,
options.head_count,
options.dim_FNN,
options.act_str,
options.num_layer,
options.num_class,
options.dropout_rate).cuda()
if os.path.exists("{}.pt".format(options.reload_model)):
model.load_state_dict(torch.load("{}.pt".format(options.reload_model)))
parameters = filter(lambda param: param.requires_grad, model.parameters())
optimizer = optimizer_wrapper(options.optimizer, options.lr, parameters)
msg = "\n{}".format(model)
display(msg)
#################################################################################
checkpoint_dir = os.path.join(options.folder, "checkpoints")
if not os.path.exists(checkpoint_dir):
os.mkdir(checkpoint_dir)
best_path = os.path.join(checkpoint_dir, options.saveto)
#################################################################################
msg = "=" * 30 + "Optimizing:" + "=" * 30
display(msg)
[train_rels, train_nums, train_sents, train_poss, train_eposs] = bags_decompose(train)
[test_rels, test_nums, test_sents, test_poss, test_eposs] = bags_decompose(test)
# batch_index = [0, 1, 2]
# batch_rels = [train_rels[m][0] for m in batch_index]
# batch_nums = [train_nums[m] for m in batch_index]
# batch_sents = [train_sents[m] for m in batch_index]
# batch_poss = [train_poss[m] for m in batch_index]
# batch_eposs = [train_eposs[m] for m in batch_index]
# batch_data = select_instance(batch_rels,
# batch_nums,
# batch_sents,
# batch_poss,
# batch_eposs,
# model)
# for sent in batch_data[0]:
# print(sent)
# print(batch_data[1])
# print(batch_data[2])
# print(batch_data[3])
train_idx_list = np.arange(len(train))
steps_per_epoch = (len(train) + options.batch_size - 1) // options.batch_size
n_updates = 0
for e in range(options.nepochs):
np.random.shuffle(train_idx_list)
for step in range(steps_per_epoch):
batch_index = train_idx_list[step * options.batch_size: (step + 1) * options.batch_size]
batch_rels = [train_rels[m][0] for m in batch_index]
batch_nums = [train_nums[m] for m in batch_index]
batch_sents = [train_sents[m] for m in batch_index]
batch_poss = [train_poss[m] for m in batch_index]
batch_eposs = [train_eposs[m] for m in batch_index]
batch_data = select_instance(batch_rels,
batch_nums,
batch_sents,
batch_poss,
batch_eposs,
model)
disp_start = time.time()
model.train()
n_updates += 1
optimizer.zero_grad()
logit = model(batch_data[0], batch_data[1], batch_data[2])
loss = F.cross_entropy(logit, batch_data[3])
loss.backward()
if options.clip_c != 0:
total_norm = torch.nn.utils.clip_grad_norm_(parameters, options.clip_c)
optimizer.step()
disp_end = time.time()
if np.mod(n_updates, options.dispFreq) == 0:
msg = "Epoch: %d, Step: %d, Loss: %f, Time: %.2f sec" %(e, n_updates, loss.cpu().item(), disp_end - disp_start)
display(msg)
if np.mod(n_updates, options.devFreq) == 0:
msg = "=" * 30 + "Evaluating" + "=" * 30
display(msg)
model.eval()
test_predict = predict(test_rels, test_nums, test_sents, test_poss, test_eposs, model)
test_pr = positive_evaluation(test_predict)
msg = 'test set PR = [' + str(test_pr[0][-1]) + ' ' + str(test_pr[1][-1]) + ']'
display(msg)
msg = "Saving model..."
display(msg)
torch.save(model.state_dict(), "{}_step_{}.pt".format(best_path, n_updates))
msg = "Model checkpoint has been saved to {}_step_{}.pt".format(best_path, n_updates)
display(msg)
end_time = time.time()
msg = "Optimizing time: %f seconds" %(end_time - start_time)
display(msg)
def predict(rels, nums, sents, poss, eposs, model):
numBags = len(rels)
predict_y = np.zeros((numBags), dtype=np.int32)
predict_y_prob = np.zeros((numBags), dtype=np.float32)
y = np.asarray(rels, dtype='int32')
for bagIndex, insRel in enumerate(rels):
insNum = nums[bagIndex]
maxP = -1
pred_rel_type = 0
max_pos_p = -1
positive_flag = False
for m in range(insNum):
insX = sents[bagIndex][m]
epos = eposs[bagIndex][m]
sel_x, sel_len, sel_epos = prepare_data([insX], [epos])
results = model(sel_x, sel_len, sel_epos)
rel_type = results.argmax()
if positive_flag and rel_type == 0:
continue
else:
# at least one instance is positive
tmpMax = results.max()
if rel_type > 0:
positive_flag = True
if tmpMax > max_pos_p:
max_pos_p = tmpMax
pred_rel_type = rel_type
else:
if tmpMax > maxP:
maxP = tmpMax
if positive_flag:
predict_y_prob[bagIndex] = max_pos_p
else:
predict_y_prob[bagIndex] = maxP
predict_y[bagIndex] = pred_rel_type
return [predict_y, predict_y_prob, y]
def main(argv):
parser = argparse.ArgumentParser()
parser.add_argument("--folder", help = "the dir of model", default = "workshop")
parser.add_argument("--file_dic", help = "the file of vocabulary", default = "./data/50/dict.txt")
parser.add_argument("--file_train", help = "the file of training data", default = "./data/gap_40_len_80/train_filtered.data")
parser.add_argument("--file_test", help = "the file of testing data", default = "./data/gap_40_len_80/test_filtered.data")
# parser.add_argument("--file_emb", help = "the file of embedding", default = "./data/50/dict_emb.txt")
parser.add_argument("--file_emb", help = "the file of embedding", default = "")
parser.add_argument("--file_log", help = "the log file", default = "train.log")
parser.add_argument("--reload_model", help = "the pretrained model", default = "")
parser.add_argument("--saveto", help = "the file to save the parameter", default = "model")
parser.add_argument("--seed", help = "the random seed", default = 1234, type = int)
parser.add_argument("--size_vocab", help = "the size of vocabulary", default = 10000, type = int)
parser.add_argument("--dim_emb", help = "the dimension of the word embedding", default = 256, type = int)
parser.add_argument("--dim_proj", help = "the dimension of the hidden state", default = 256, type = int)
parser.add_argument("--head_count", help = "the num of head in multi head attention", default = 8, type = int)
parser.add_argument("--dim_FNN", help = "the dimension of the positionwise FNN", default = 256, type = int)
parser.add_argument("--act_str", help = "the activation function of the positionwise FNN", default = "relu")
parser.add_argument("--num_layer", help = "the num of layers", default = 6, type = int)
parser.add_argument("--num_class", help = "the number of labels", default = 27, type = int)
parser.add_argument("--position_emb", help = "if true, the position embedding will be used", default = False, action = "store_true")
parser.add_argument("--fine_tune", help = "if true, the pretrained embedding will be fine tuned", default = False, action = "store_true")
parser.add_argument("--optimizer", help = "optimization algorithm", default = "adam")
parser.add_argument("--lr", help = "learning rate", default = 0.0004, type = float)
parser.add_argument("--dropout_rate", help = "dropout rate", default = 0.5, type = float)
parser.add_argument("--clip_c", help = "grad clip", default = 10.0, type = float)
parser.add_argument("--nepochs", help = "the max epoch", default = 30, type = int)
parser.add_argument("--batch_size", help = "batch size", default = 32, type = int)
parser.add_argument("--dispFreq", help = "the frequence of display", default = 100, type = int)
parser.add_argument("--devFreq", help = "the frequence of evaluation", default = -1, type = int)
parser.add_argument("--wait_N", help = "use to early stop", default = 1, type = int)
parser.add_argument("--patience", help = "use to early stop", default = 7, type = int)
parser.add_argument("--maxlen", help = "max length of sentence", default = 100, type = int)
parser.add_argument("--gpus", help = "specify the GPU IDs", default = "0")
options = parser.parse_args(argv)
train(options)
if "__main__" == __name__:
main(sys.argv[1:])
|
Python
| 275
| 35.825455
| 138
|
/train.py
| 0.633356
| 0.621803
|
yywang0514/dsnre
|
refs/heads/master
|
import torch
import torch.nn as nn
import math
class LayerNorm(nn.Module):
"""Layer Normalization class"""
def __init__(self, features, eps=1e-6):
super(LayerNorm, self).__init__()
self.a_2 = nn.Parameter(torch.ones(features))
self.b_2 = nn.Parameter(torch.zeros(features))
self.eps = eps
def forward(self, x):
mean = x.mean(-1, keepdim=True)
std = x.std(-1, keepdim=True)
return self.a_2 * (x - mean) / (std + self.eps) + self.b_2
class MLP(nn.Module):
def __init__(self, dim_in, dim_out):
super(MLP, self).__init__()
self.dim_in = dim_in
self.dim_out = dim_out
self._init_params()
def _init_params(self):
self.mlp = nn.Linear(in_features = self.dim_in,
out_features = self.dim_out)
def forward(self, inp):
proj_inp = self.mlp(inp)
return proj_inp
class BiLstm(nn.Module):
def __init__(self, dim_in, dim_out):
super(BiLstm, self).__init__()
self.dim_in = dim_in
self.dim_out = dim_out
self._init_params()
def _init_params(self):
self.bilstm = nn.LSTM(input_size = self.dim_in,
hidden_size = self.dim_out,
bidirectional = True)
def forward(self, inp, inp_len):
sorted_inp_len, sorted_idx = torch.sort(inp_len, dim = 0, descending=True)
sorted_inp = torch.index_select(inp, dim = 1, index = sorted_idx)
pack_inp = torch.nn.utils.rnn.pack_padded_sequence(sorted_inp, sorted_inp_len)
proj_inp, _ = self.bilstm(pack_inp)
proj_inp = torch.nn.utils.rnn.pad_packed_sequence(proj_inp)
unsorted_idx = torch.zeros(sorted_idx.size()).long().cuda().scatter_(0, sorted_idx, torch.arange(inp.size()[1]).long().cuda())
unsorted_proj_inp = torch.index_select(proj_inp[0], dim = 1, index = unsorted_idx)
return unsorted_proj_inp
class Word_Emb(nn.Module):
def __init__(self,
fine_tune,
pre_train_emb,
part_point,
size_vocab,
dim_emb):
super(Word_Emb, self).__init__()
self.fine_tune = fine_tune
self.pre_train_emb = pre_train_emb
self.part_point = part_point
self.size_vocab = size_vocab
self.dim_emb = dim_emb
self._init_params()
def _init_params(self):
self.embedding = torch.nn.ModuleList()
if (not self.fine_tune) and self.pre_train_emb:
self.embedding.append(nn.Embedding(self.part_point, self.dim_emb))
self.embedding.append(nn.Embedding.from_pretrained(torch.Tensor(self.pre_train_emb), freeze = True))
elif self.fine_tune and self.pre_train_emb:
init_embedding = 0.01 * np.random.randn(self.size_vocab, self.dim_emb).astype(np.float32)
init_embedding[self.part_point: ] = self.pre_train_emb
self.embedding.append(nn.Embedding.from_pretrained(torch.Tensor(init_embedding), freeze = False))
else:
self.embedding.append(nn.Embedding(self.size_vocab, self.dim_emb))
def forward(self, inp):
if (not self.fine_tune) and self.pre_train_emb:
def get_emb(inp):
mask = self.inp2mask(inp)
inp_1 = inp * mask
emb_1 = self.embedding[0](inp_1) * mask[:, :, None].float()
inp_2 = (inp - self.part_point) * (1 - mask)
emb_2 = self.embedding[1](inp_2) * (1 - mask)[:, :, None].float()
emb = emb_1 + emb_2
return emb
emb_inp = get_emb(inp)
else:
emb_inp = self.embedding[0](inp)
return emb_inp
def inp2mask(self, inp):
mask = (inp < self.part_point).long()
return mask
class Position_Emb(nn.Module):
def __init__(self, dim_emb):
super(Position_Emb, self).__init__()
self.dim_emb = dim_emb
self._init_params()
def _init_params(self):
pass
def forward(self, inp):
pass
class Wemb(nn.Module):
"""docstring for Wemb"""
def __init__(self,
fine_tune,
pre_train_emb,
part_point,
size_vocab,
dim_emb,
position_emb,
dropout_rate):
super(Wemb, self).__init__()
self.fine_tune = fine_tune
self.pre_train_emb = pre_train_emb
self.part_point = part_point
self.size_vocab = size_vocab
self.dim_emb = dim_emb
self.position_emb = position_emb
self.dropout_rate = dropout_rate
self._init_params()
def _init_params(self):
self.wembs = torch.nn.ModuleList()
self.wembs.append(Word_Emb(self.fine_tune, self.pre_train_emb, self.part_point, self.size_vocab, self.dim_emb))
if self.position_emb:
self.wembs.append(Position_Emb(self.dim_emb))
self.layer_norm = LayerNorm(self.dim_emb)
self.dropout = nn.Dropout(self.dropout_rate)
def forward(self, inp):
def add_n(inps):
rval = inps[0] * 0
for inp in inps:
rval += inp
return rval
emb_inps = []
for wemb in self.wembs:
emb_inps.append(wemb(inp))
emb_inp = add_n(emb_inps)
emb_inp = self.layer_norm(emb_inp)
emb_inp = self.dropout(emb_inp)
return emb_inp
class Multi_Head_Attention(nn.Module):
def __init__(self,
dim_proj,
head_count,
dropout_rate):
super(Multi_Head_Attention, self).__init__()
self.dim_proj = dim_proj
self.head_count = head_count
self.dim_per_head = self.dim_proj // self.head_count
self.dropout_rate = dropout_rate
self._init_params()
def _init_params(self):
self.linear_key = nn.Linear(self.dim_proj, self.head_count * self.dim_per_head)
self.linear_value = nn.Linear(self.dim_proj, self.head_count * self.dim_per_head)
self.linear_query = nn.Linear(self.dim_proj, self.head_count * self.dim_per_head)
self.dropout = nn.Dropout(self.dropout_rate)
self.softmax = nn.Softmax(dim=-1)
def forward(self, key, value, query, mask = None):
# key: batch X key_len X hidden
# value: batch X value_len X hidden
# query: batch X query_len X hidden
# mask: batch X query_len X key_len
batch_size = key.size()[0]
key_ = self.linear_key(key)
value_ = self.linear_value(value)
query_ = self.linear_query(query)
key_ = key_.reshape(batch_size, -1, self.head_count, self.dim_per_head).transpose(1, 2)
value_ = value_.reshape(batch_size, -1, self.head_count, self.dim_per_head).transpose(1, 2)
query_ = query_.reshape(batch_size, -1, self.head_count, self.dim_per_head).transpose(1, 2)
attention_scores = torch.matmul(query_, key_.transpose(2, 3))
attention_scores = attention_scores / math.sqrt(float(self.dim_per_head))
if mask is not None:
mask = mask.unsqueeze(1).expand_as(attention_scores)
attention_scores = attention_scores.masked_fill(1 - mask, -1e18)
attention_probs = self.softmax(attention_scores)
attention_probs = self.dropout(attention_probs)
context = torch.matmul(attention_probs, value_)
context = context.transpose(1, 2).reshape(batch_size, -1, self.head_count * self.dim_per_head)
return context
class TransformerEncoderBlock(nn.Module):
def __init__(self,
dim_proj,
head_count,
dim_FNN,
act_fn,
dropout_rate):
super(TransformerEncoderBlock, self).__init__()
self.dim_proj = dim_proj
self.head_count = head_count
self.dim_FNN = dim_FNN
self.act_fn = act_fn
self.dropout_rate = dropout_rate
self._init_params()
def _init_params(self):
self.multi_head_attention = Multi_Head_Attention(self.dim_proj, self.head_count, self.dropout_rate)
self.linear_proj_context = MLP(self.dim_proj, self.dim_proj)
self.layer_norm_context = LayerNorm(self.dim_proj)
self.position_wise_fnn = MLP(self.dim_proj, self.dim_FNN)
self.linear_proj_intermediate = MLP(self.dim_FNN, self.dim_proj)
self.layer_norm_intermediate = LayerNorm(self.dim_proj)
self.dropout = nn.Dropout(self.dropout_rate)
def forward(self, inp, mask):
context = self.multi_head_attention(inp, inp, inp, mask = mask)
context = self.linear_proj_context(context)
context = self.dropout(context)
res_inp = self.layer_norm_context(inp + context)
rval = self.act_fn(self.position_wise_fnn(res_inp))
rval = self.linear_proj_intermediate(rval)
rval = self.dropout(rval)
res_rval = self.layer_norm_intermediate(rval + res_inp)
return res_rval
def get_activation(act_str):
if act_str == "relu":
return torch.nn.ReLU()
elif act_str == "tanh":
return torch.nn.Tanh()
elif act_str == "sigmoid":
return torch.nn.Sigmoid()
class TransformerEncoder(nn.Module):
def __init__(self,
dim_proj,
head_count,
dim_FNN,
act_str,
num_layers,
dropout_rate):
super(TransformerEncoder, self).__init__()
self.dim_proj = dim_proj
self.head_count = head_count
self.dim_FNN = dim_FNN
self.act_fn = get_activation(act_str)
self.num_layers = num_layers
self.dropout_rate = dropout_rate
self._init_params()
def _init_params(self):
self.transformer = torch.nn.ModuleList([TransformerEncoderBlock(self.dim_proj, self.head_count, self.dim_FNN, self.act_fn, self.dropout_rate) for _ in range(self.num_layers)])
def forward(self, inp, mask = None):
rval = []
pre_output = inp
for i in range(self.num_layers):
cur_output = self.transformer[i](pre_output, mask)
rval.append(cur_output)
pre_output = cur_output
return pre_output, rval
def optimizer_wrapper(optimizer, lr, parameters):
if optimizer == "adam":
opt = torch.optim.Adam(params = parameters, lr = lr)
return opt
|
Python
| 311
| 27.713827
| 177
|
/lib/module.py
| 0.672564
| 0.666181
|
yywang0514/dsnre
|
refs/heads/master
|
import torch
import torch.nn as nn
from lib import *
class Model(nn.Module):
def __init__(self,
fine_tune,
pre_train_emb,
part_point,
size_vocab,
dim_emb,
dim_proj,
head_count,
dim_FNN,
act_str,
num_layer,
num_class,
dropout_rate):
super(Model, self).__init__()
self.fine_tune = fine_tune
self.pre_train_emb = pre_train_emb
self.part_point = part_point
self.size_vocab = size_vocab
self.dim_emb = dim_emb
self.dim_proj = dim_proj
self.head_count = head_count
self.dim_FNN = dim_FNN
self.act_str = act_str
self.num_layer = num_layer
self.num_class = num_class
self.dropout_rate = dropout_rate
self._init_params()
def _init_params(self):
self.wemb = Word_Emb(self.fine_tune,
self.pre_train_emb,
self.part_point,
self.size_vocab,
self.dim_emb)
self.encoder = TransformerEncoder(self.dim_proj,
self.head_count,
self.dim_FNN,
self.act_str,
self.num_layer,
self.dropout_rate)
self.dense = MLP(self.dim_proj * 3, self.dim_proj)
self.relu = torch.nn.ReLU()
self.classifier = MLP(self.dim_proj, self.num_class)
self.dropout = nn.Dropout(self.dropout_rate)
def forward(self, inp, lengths, epos):
mask, mask_l, mask_m, mask_r = self.pos2mask(epos, lengths)
emb_inp = self.wemb(inp)
emb_inp = self.dropout(emb_inp)
proj_inp, _ = self.encoder(emb_inp, self.create_attention_mask(mask, mask))
proj_inp = proj_inp * mask[:, :, None]
pool_inp_l = torch.sum(proj_inp * mask_l[:, :, None], dim = 1) / torch.sum(mask_l, dim = 1)[:, None]
pool_inp_m = torch.sum(proj_inp * mask_m[:, :, None], dim = 1) / torch.sum(mask_m, dim = 1)[:, None]
pool_inp_r = torch.sum(proj_inp * mask_r[:, :, None], dim = 1) / torch.sum(mask_r, dim = 1)[:, None]
pool_inp = torch.cat([pool_inp_l, pool_inp_m, pool_inp_r], dim = 1)
pool_inp = self.dropout(pool_inp)
logit = self.relu(self.dense(pool_inp))
logit = self.dropout(logit)
logit = self.classifier(logit)
return logit
def pos2mask(self, epos, lengths):
mask = self.len2mask(lengths)
nsample = lengths.size()[0]
max_len = torch.max(lengths)
idxes = torch.arange(0, max_len).cuda()
mask_l = (idxes < epos[:, 0].unsqueeze(1)).float()
mask_r = mask - (idxes < epos[:, 1].unsqueeze(1)).float()
mask_m = torch.ones([nsample, max_len]).float().cuda() - mask_l - mask_r
return mask, mask_l, mask_m, mask_r
def len2mask(self, lengths):
max_len = torch.max(lengths)
idxes = torch.arange(0, max_len).cuda()
mask = (idxes < lengths.unsqueeze(1)).float()
return mask
def create_attention_mask(self, query_mask, key_mask):
return torch.matmul(query_mask[:, :, None], key_mask[:, None, :]).byte()
|
Python
| 99
| 29.747475
| 192
|
/model.py
| 0.567861
| 0.561288
|
yywang0514/dsnre
|
refs/heads/master
|
from module import *
from util import *
from data_iterator import *
|
Python
| 3
| 21.666666
| 27
|
/lib/__init__.py
| 0.764706
| 0.764706
|
yywang0514/dsnre
|
refs/heads/master
|
import sys
import codecs
class InstanceBag(object):
def __init__(self, entities, rel, num, sentences, positions, entitiesPos):
self.entities = entities
self.rel = rel
self.num = num
self.sentences = sentences
self.positions = positions
self.entitiesPos = entitiesPos
def bags_decompose(data_bags):
bag_sent = [data_bag.sentences for data_bag in data_bags]
bag_pos = [data_bag.positions for data_bag in data_bags]
bag_num = [data_bag.num for data_bag in data_bags]
bag_rel = [data_bag.rel for data_bag in data_bags]
bag_epos = [data_bag.entitiesPos for data_bag in data_bags]
return [bag_rel, bag_num, bag_sent, bag_pos, bag_epos]
def datafold(filename):
f = open(filename, 'r')
data = []
while 1:
line = f.readline()
if not line:
break
entities = map(int, line.split(' '))
line = f.readline()
bagLabel = line.split(' ')
rel = map(int, bagLabel[0:-1])
num = int(bagLabel[-1])
positions = []
sentences = []
entitiesPos = []
for i in range(0, num):
sent = f.readline().split(' ')
positions.append(map(int, sent[0:2]))
epos = map(int, sent[0:2])
epos.sort()
entitiesPos.append(epos)
sentences.append(map(int, sent[2:-1]))
ins = InstanceBag(entities, rel, num, sentences, positions, entitiesPos)
data += [ins]
f.close()
return data
def change_word_idx(data):
new_data = []
for inst in data:
entities = inst.entities
rel = inst.rel
num = inst.num
sentences = inst.sentences
positions = inst.positions
entitiesPos = inst.entitiesPos
new_sentences = []
for sent in sentences:
new_sent = []
for word in sent:
if word == 160696:
new_sent.append(1)
elif word == 0:
new_sent.append(0)
else:
new_sent.append(word + 1)
new_sentences.append(new_sent)
new_inst = InstanceBag(entities, rel, num, new_sentences, positions, entitiesPos)
new_data.append(new_inst)
return new_data
def save_data(data, textfile):
with codecs.open(textfile, "w", encoding = "utf8") as f:
for inst in data:
f.write("%s\n" %(" ".join(map(str, inst.entities))))
f.write("%s %s\n" %(" ".join(map(str, inst.rel)), str(inst.num)))
for pos, sent in zip(inst.positions, inst.sentences):
f.write("%s %s\n" %(" ".join(map(str, pos)), " ".join(map(str, sent))))
def main(argv):
data = datafold(argv[0])
new_data = change_word_idx(data)
save_data(new_data, argv[1])
if "__main__" == __name__:
main(sys.argv[1:])
|
Python
| 87
| 27
| 83
|
/format.py
| 0.645996
| 0.635729
|
yywang0514/dsnre
|
refs/heads/master
|
import sys
import re
import numpy as np
import cPickle as pkl
import codecs
import logging
from data_iterator import *
logger = logging.getLogger()
extra_token = ["<PAD>", "<UNK>"]
def display(msg):
print(msg)
logger.info(msg)
def datafold(filename):
f = open(filename, 'r')
data = []
while 1:
line = f.readline()
if not line:
break
entities = map(int, line.split(' '))
line = f.readline()
bagLabel = line.split(' ')
rel = map(int, bagLabel[0:-1])
num = int(bagLabel[-1])
positions = []
sentences = []
entitiesPos = []
for i in range(0, num):
sent = f.readline().split(' ')
positions.append(map(int, sent[0:2]))
epos = map(int, sent[0:2])
epos.sort()
entitiesPos.append(epos)
sentences.append(map(int, sent[2:-1]))
ins = InstanceBag(entities, rel, num, sentences, positions, entitiesPos)
data += [ins]
f.close()
return data
def dicfold(textfile):
vocab = []
with codecs.open(textfile, "r", encoding = "utf8") as f:
for line in f:
line = line.strip()
if line:
vocab.append(line)
return vocab
def build_word2idx(vocab, textFile):
msg = "Building word2idx..."
display(msg)
pre_train_emb = []
part_point = len(vocab)
if textFile:
word2emb = load_emb(vocab, textFile)
pre_train_vocab = []
un_pre_train_vocab = []
for word in vocab:
if word in word2emb:
pre_train_vocab.append(word)
pre_train_emb.append(word2emb[word])
else:
un_pre_train_vocab.append(word)
part_point = len(un_pre_train_vocab)
un_pre_train_vocab.extend(pre_train_vocab)
vocab = un_pre_train_vocab
word2idx = {}
for v, k in enumerate(extra_token):
word2idx[k] = v
for v, k in enumerate(vocab):
word2idx[k] = v + 2
part_point += 2
return word2idx, pre_train_emb, part_point
def load_emb(vocab, textFile):
msg = 'load emb from ' + textFile
display(msg)
vocab_set = set(vocab)
word2emb = {}
emb_p = re.compile(r" |\t")
count = 0
with codecs.open(textFile, "r", "utf8") as filein:
for line in filein:
count += 1
array = emb_p.split(line.strip())
word = array[0]
if word in vocab_set:
vector = [float(array[i]) for i in range(1, len(array))]
word2emb[word] = vector
del vocab_set
msg = "find %d words in %s" %(count, textFile)
display(msg)
msg = "Summary: %d words in the vocabulary and %d of them appear in the %s" %(len(vocab), len(word2emb), textFile)
display(msg)
return word2emb
def positive_evaluation(predict_results):
predict_y = predict_results[0]
predict_y_prob = predict_results[1]
y_given = predict_results[2]
positive_num = 0
#find the number of positive examples
for yi in range(y_given.shape[0]):
if y_given[yi, 0] > 0:
positive_num += 1
# if positive_num == 0:
# positive_num = 1
# sort prob
index = np.argsort(predict_y_prob)[::-1]
all_pre = [0]
all_rec = [0]
p_n = 0
p_p = 0
n_p = 0
# print y_given.shape[0]
for i in range(y_given.shape[0]):
labels = y_given[index[i],:] # key given labels
py = predict_y[index[i]] # answer
if labels[0] == 0:
# NA bag
if py > 0:
n_p += 1
else:
# positive bag
if py == 0:
p_n += 1
else:
flag = False
for j in range(y_given.shape[1]):
if j == -1:
break
if py == labels[j]:
flag = True # true positive
break
if flag:
p_p += 1
if (p_p+n_p) == 0:
precision = 1
else:
precision = float(p_p)/(p_p+n_p)
recall = float(p_p)/positive_num
if precision != all_pre[-1] or recall != all_rec[-1]:
all_pre.append(precision)
all_rec.append(recall)
return [all_pre[1:], all_rec[1:]]
|
Python
| 169
| 20.177515
| 115
|
/lib/util.py
| 0.622974
| 0.604807
|
yywang0514/dsnre
|
refs/heads/master
|
import time
import cPickle
import numpy as np
import torch
class InstanceBag(object):
def __init__(self, entities, rel, num, sentences, positions, entitiesPos):
self.entities = entities
self.rel = rel
self.num = num
self.sentences = sentences
self.positions = positions
self.entitiesPos = entitiesPos
def bags_decompose(data_bags):
bag_sent = [data_bag.sentences for data_bag in data_bags]
bag_pos = [data_bag.positions for data_bag in data_bags]
bag_num = [data_bag.num for data_bag in data_bags]
bag_rel = [data_bag.rel for data_bag in data_bags]
bag_epos = [data_bag.entitiesPos for data_bag in data_bags]
return [bag_rel, bag_num, bag_sent, bag_pos, bag_epos]
def select_instance(rels, nums, sents, poss, eposs, model):
batch_x = []
batch_len = []
batch_epos = []
batch_y = []
for bagIndex, insNum in enumerate(nums):
maxIns = 0
maxP = -1
if insNum > 1:
for m in range(insNum):
insX = sents[bagIndex][m]
epos = eposs[bagIndex][m]
sel_x, sel_len, sel_epos = prepare_data([insX], [epos])
results = model(sel_x, sel_len, sel_epos)
tmpMax = results.max()
if tmpMax > maxP:
maxIns = m
maxP=tmpMax
batch_x.append(sents[bagIndex][maxIns])
batch_epos.append(eposs[bagIndex][maxIns])
batch_y.append(rels[bagIndex])
batch_x, batch_len, batch_epos = prepare_data(batch_x, batch_epos)
batch_y = torch.LongTensor(np.array(batch_y).astype("int32")).cuda()
return [batch_x, batch_len, batch_epos, batch_y]
def prepare_data(sents, epos):
lens = [len(sent) for sent in sents]
n_samples = len(lens)
max_len = max(lens)
batch_x = np.zeros((n_samples, max_len)).astype("int32")
for idx, s in enumerate(sents):
batch_x[idx, :lens[idx]] = s
batch_len = np.array(lens).astype("int32")
batch_epos = np.array(epos).astype("int32")
return torch.LongTensor(batch_x).cuda(), torch.LongTensor(batch_len).cuda(), torch.LongTensor(batch_epos).cuda()
|
Python
| 64
| 32.4375
| 116
|
/lib/data_iterator.py
| 0.612617
| 0.607477
|
porterjamesj/bcbio-nextgen
|
refs/heads/master
|
"""Calculate potential effects of variations using external programs.
Supported:
snpEff: http://sourceforge.net/projects/snpeff/
"""
import os
import csv
import glob
from bcbio import utils
from bcbio.distributed.transaction import file_transaction
from bcbio.pipeline import config_utils, tools
from bcbio.provenance import do
from bcbio.variation import vcfutils
# ## snpEff variant effects
def snpeff_effects(data):
"""Annotate input VCF file with effects calculated by snpEff.
"""
vcf_in = data["vrn_file"]
interval_file = data["config"]["algorithm"].get("variant_regions", None)
if vcfutils.vcf_has_variants(vcf_in):
se_interval = (_convert_to_snpeff_interval(interval_file, vcf_in)
if interval_file else None)
try:
vcf_file = _run_snpeff(vcf_in, se_interval, "vcf", data)
finally:
for fname in [se_interval]:
if fname and os.path.exists(fname):
os.remove(fname)
return vcf_file
def _snpeff_args_from_config(data):
"""Retrieve snpEff arguments supplied through input configuration.
"""
config = data["config"]
args = []
# General supplied arguments
resources = config_utils.get_resources("snpeff", config)
if resources.get("options"):
args += [str(x) for x in resources.get("options", [])]
# cancer specific calling arguments
if data.get("metadata", {}).get("phenotype") in ["tumor", "normal"]:
args += ["-cancer"]
# Provide options tuned to reporting variants in clinical environments
if config["algorithm"].get("clinical_reporting"):
args += ["-canon", "-hgvs"]
return args
def get_db(ref_file, resources, config=None):
"""Retrieve a snpEff database name and location relative to reference file.
"""
snpeff_db = resources.get("aliases", {}).get("snpeff")
if snpeff_db:
snpeff_base_dir = utils.safe_makedir(os.path.normpath(os.path.join(
os.path.dirname(os.path.dirname(ref_file)), "snpeff")))
# back compatible retrieval of genome from installation directory
if config and not os.path.exists(os.path.join(snpeff_base_dir, snpeff_db)):
snpeff_base_dir, snpeff_db = _installed_snpeff_genome(snpeff_db, config)
else:
snpeff_base_dir = None
return snpeff_db, snpeff_base_dir
def get_cmd(cmd_name, datadir, config):
"""Retrieve snpEff base command line, handling command line and jar based installs.
"""
resources = config_utils.get_resources("snpeff", config)
memory = " ".join(resources.get("jvm_opts", ["-Xms750m", "-Xmx5g"]))
try:
snpeff = config_utils.get_program("snpeff", config)
cmd = "{snpeff} {memory} {cmd_name} -dataDir {datadir}"
except config_utils.CmdNotFound:
snpeff_jar = config_utils.get_jar("snpEff",
config_utils.get_program("snpeff", config, "dir"))
config_file = "%s.config" % os.path.splitext(snpeff_jar)[0]
cmd = "java {memory} -jar {snpeff_jar} {cmd_name} -c {config_file} -dataDir {datadir}"
return cmd.format(**locals())
def _run_snpeff(snp_in, se_interval, out_format, data):
snpeff_db, datadir = get_db(data["sam_ref"], data["genome_resources"], data["config"])
assert datadir is not None, \
"Did not find snpEff resources in genome configuration: %s" % data["genome_resources"]
assert os.path.exists(os.path.join(datadir, snpeff_db)), \
"Did not find %s snpEff genome data in %s" % (snpeff_db, datadir)
snpeff_cmd = get_cmd("eff", datadir, data["config"])
ext = utils.splitext_plus(snp_in)[1] if out_format == "vcf" else ".tsv"
out_file = "%s-effects%s" % (utils.splitext_plus(snp_in)[0], ext)
if not utils.file_exists(out_file):
interval = "-filterinterval %s" % (se_interval) if se_interval else ""
config_args = " ".join(_snpeff_args_from_config(data))
if ext.endswith(".gz"):
bgzip_cmd = "| %s -c" % tools.get_bgzip_cmd(data["config"])
else:
bgzip_cmd = ""
with file_transaction(out_file) as tx_out_file:
cmd = ("{snpeff_cmd} {interval} {config_args} -noLog -1 -i vcf -o {out_format} "
"{snpeff_db} {snp_in} {bgzip_cmd} > {tx_out_file}")
do.run(cmd.format(**locals()), "snpEff effects", data)
if ext.endswith(".gz"):
out_file = vcfutils.bgzip_and_index(out_file, data["config"])
return out_file
def _convert_to_snpeff_interval(in_file, base_file):
"""Handle wide variety of BED-like inputs, converting to BED-3.
"""
out_file = "%s-snpeff-intervals.bed" % utils.splitext_plus(base_file)[0]
if not os.path.exists(out_file):
with open(out_file, "w") as out_handle:
writer = csv.writer(out_handle, dialect="excel-tab")
with open(in_file) as in_handle:
for line in (l for l in in_handle if not l.startswith(("@", "#"))):
parts = line.split()
writer.writerow(parts[:3])
return out_file
# ## back-compatibility
def _find_snpeff_datadir(config_file):
with open(config_file) as in_handle:
for line in in_handle:
if line.startswith("data_dir"):
data_dir = config_utils.expand_path(line.split("=")[-1].strip())
if not data_dir.startswith("/"):
data_dir = os.path.join(os.path.dirname(config_file), data_dir)
return data_dir
raise ValueError("Did not find data directory in snpEff config file: %s" % config_file)
def _installed_snpeff_genome(base_name, config):
"""Find the most recent installed genome for snpEff with the given name.
"""
snpeff_config_file = os.path.join(config_utils.get_program("snpEff", config, "dir"),
"snpEff.config")
data_dir = _find_snpeff_datadir(snpeff_config_file)
dbs = [d for d in sorted(glob.glob(os.path.join(data_dir, "%s*" % base_name)), reverse=True)
if os.path.isdir(d)]
if len(dbs) == 0:
raise ValueError("No database found in %s for %s" % (data_dir, base_name))
else:
return data_dir, os.path.split(dbs[0])[-1]
|
Python
| 140
| 43.628571
| 96
|
/bcbio/variation/effects.py
| 0.617958
| 0.615557
|
porterjamesj/bcbio-nextgen
|
refs/heads/master
|
"""Run distributed functions provided a name and json/YAML file with arguments.
Enables command line access and alternative interfaces to run specific
functionality within bcbio-nextgen.
"""
import yaml
from bcbio.distributed import multitasks
def process(args):
"""Run the function in args.name given arguments in args.argfile.
"""
try:
fn = getattr(multitasks, args.name)
except AttributeError:
raise AttributeError("Did not find exposed function in bcbio.distributed.multitasks named '%s'" % args.name)
with open(args.argfile) as in_handle:
fnargs = yaml.safe_load(in_handle)
fn(fnargs)
def add_subparser(subparsers):
parser = subparsers.add_parser("runfn", help=("Run a specific bcbio-nextgen function."
"Intended for distributed use."))
parser.add_argument("name", help="Name of the function to run")
parser.add_argument("argfile", help="JSON file with arguments to the function")
|
Python
| 25
| 39
| 116
|
/bcbio/distributed/runfn.py
| 0.691
| 0.691
|
porterjamesj/bcbio-nextgen
|
refs/heads/master
|
"""Run distributed tasks in parallel using IPython or joblib on multiple cores.
"""
import functools
try:
import joblib
except ImportError:
joblib = False
from bcbio.distributed import ipython
from bcbio.log import logger, setup_local_logging
from bcbio.provenance import diagnostics, system
def parallel_runner(parallel, dirs, config):
"""Process a supplied function: single, multi-processor or distributed.
"""
def run_parallel(fn_name, items, metadata=None):
items = [x for x in items if x is not None]
if len(items) == 0:
return []
items = diagnostics.track_parallel(items, fn_name)
sysinfo = system.get_info(dirs, parallel)
if parallel["type"] == "ipython":
return ipython.runner(parallel, fn_name, items, dirs["work"], sysinfo, config)
else:
imodule = parallel.get("module", "bcbio.distributed")
logger.info("multiprocessing: %s" % fn_name)
fn = getattr(__import__("{base}.multitasks".format(base=imodule),
fromlist=["multitasks"]),
fn_name)
return run_multicore(fn, items, config, parallel["cores"])
return run_parallel
def zeromq_aware_logging(f):
"""Ensure multiprocessing logging uses ZeroMQ queues.
ZeroMQ and local stdout/stderr do not behave nicely when intertwined. This
ensures the local logging uses existing ZeroMQ logging queues.
"""
@functools.wraps(f)
def wrapper(*args, **kwargs):
config = None
for arg in args:
if ipython.is_std_config_arg(arg):
config = arg
break
elif ipython.is_nested_config_arg(arg):
config = arg["config"]
break
assert config, "Could not find config dictionary in function arguments."
if config.get("parallel", {}).get("log_queue"):
handler = setup_local_logging(config, config["parallel"])
else:
handler = None
try:
out = f(*args, **kwargs)
finally:
if handler and hasattr(handler, "close"):
handler.close()
return out
return wrapper
def run_multicore(fn, items, config, cores=None):
"""Run the function using multiple cores on the given items to process.
"""
if cores is None:
cores = config["algorithm"].get("num_cores", 1)
parallel = {"type": "local", "cores": cores}
sysinfo = system.get_info({}, parallel)
jobr = ipython.find_job_resources([fn], parallel, items, sysinfo, config,
parallel.get("multiplier", 1),
max_multicore=int(sysinfo["cores"]))
items = [ipython.add_cores_to_config(x, jobr.cores_per_job) for x in items]
if joblib is None:
raise ImportError("Need joblib for multiprocessing parallelization")
out = []
for data in joblib.Parallel(jobr.num_jobs)(joblib.delayed(fn)(x) for x in items):
if data:
out.extend(data)
return out
|
Python
| 80
| 37.762501
| 90
|
/bcbio/distributed/messaging.py
| 0.598194
| 0.597227
|
porterjamesj/bcbio-nextgen
|
refs/heads/master
|
from os import path
from bcbio.pipeline import config_utils
from bcbio.utils import safe_makedir, file_exists, get_in
from bcbio.provenance import do
CLEANUP_FILES = ["Aligned.out.sam", "Log.out", "Log.progress.out"]
def align(fastq_file, pair_file, ref_file, names, align_dir, data):
config = data["config"]
out_prefix = path.join(align_dir, names["lane"])
out_file = out_prefix + "Aligned.out.sam"
if file_exists(out_file):
return out_file
star_path = config_utils.get_program("STAR", config)
fastq = " ".join([fastq_file, pair_file]) if pair_file else fastq_file
num_cores = config["algorithm"].get("num_cores", 1)
safe_makedir(align_dir)
cmd = ("{star_path} --genomeDir {ref_file} --readFilesIn {fastq} "
"--runThreadN {num_cores} --outFileNamePrefix {out_prefix} "
"--outReadsUnmapped Fastx --outFilterMultimapNmax 10")
fusion_mode = get_in(data, ("config", "algorithm", "fusion_mode"), False)
if fusion_mode:
cmd += " --chimSegmentMin 15 --chimJunctionOverhangMin 15"
strandedness = get_in(data, ("config", "algorithm", "strandedness"),
"unstranded").lower()
if strandedness == "unstranded":
cmd += " --outSAMstrandField intronMotif"
run_message = "Running STAR aligner on %s and %s." % (pair_file, ref_file)
do.run(cmd.format(**locals()), run_message, None)
return out_file
def _get_quality_format(config):
qual_format = config["algorithm"].get("quality_format", None)
if qual_format.lower() == "illumina":
return "fastq-illumina"
elif qual_format.lower() == "solexa":
return "fastq-solexa"
else:
return "fastq-sanger"
def remap_index_fn(ref_file):
"""Map sequence references to equivalent star indexes
"""
return path.join(path.dirname(path.dirname(ref_file)), "star")
def job_requirements(cores, memory):
MIN_STAR_MEMORY = 30.0
if not memory or cores * memory < MIN_STAR_MEMORY:
memory = MIN_STAR_MEMORY / cores
return cores, memory
align.job_requirements = job_requirements
|
Python
| 54
| 37.888889
| 78
|
/bcbio/ngsalign/star.py
| 0.651905
| 0.647143
|
MagnusPoppe/hidden-markov-models
|
refs/heads/master
|
import random
import sys
actions = ["LEFT", "RIGHT", "UP", "DOWN"]
def perform_action(x, y, action):
if action == "LEFT" and x != 0: return x-1, y
if action == "RIGHT" and x != 3: return x+1, y
if action == "UP" and y != 0: return x, y-1
if action == "DOWN" and y != 2: return x, y+1
return x, y
def transition_model(x, y, action):
preferred = [
["RIGHT", "RIGHT", "RIGHT", "LEFT"],
["UP", "DOWN", "UP", "UP" ],
["UP", "LEFT", "UP", "LEFT"],
][y][x]
return 1 if action == preferred else 0.0
def policy_evaluation(policy, utilities, states, discount):
for x, y in states:
transitions = [transition_model(x, y, policy[y][x]) * utilities[yy][xx] for xx, yy in all_possibles(x, y)]
utilities[y][x] = reward[y][x] + discount * sum(transitions)
return utilities
def best_action(state, u):
best_action = (None, -sys.maxsize)
for a in actions:
score = aciton_score(state, a, u)
if score > best_action[1]:
best_action = (a, score)
return best_action
all_possibles = lambda x, y: [perform_action(x, y, action) for action in actions]
aciton_score = lambda s, a, u: sum([transition_model(x, y, a) * u[y][x] for x, y in all_possibles(*s)])
reward = [
[-0.04, -0.04, -0.04, +1],
[-0.04, -100, -0.04, -1],
[-0.04, -0.04, -0.04, -0.04],
]
states = [(x, y) for x in range(4) for y in range(3)]
random_initial_policy = [random.sample(actions, 4)]*3
def policy_iteration(mdp, policy, discount):
unchanged = False
u = [[0]*4]*3
i = 0
while not unchanged:
# Evaluate policy using bellman equation
u = policy_evaluation(policy, u, states, discount)
unchanged = True
for state in mdp:
x, y = state
# Compare with action in policy with all others to see if best:
if best_action(state, u)[1] > aciton_score(state, policy[y][x], u):
policy[y][x] = best_action(state, u)[0]
# Mark as changed to loop one more time.
unchanged = False
if i == 100: break
i += 1
return policy
print(policy_iteration(states, random_initial_policy, 0.9))
|
Python
| 69
| 31.536232
| 114
|
/policy_iteration.py
| 0.55615
| 0.529412
|
MagnusPoppe/hidden-markov-models
|
refs/heads/master
|
import pandas as pd
from math import log2
_TRAINING_FILE = "/Users/magnus/Downloads/data/training.csv"
_TESTING_FILE = "/Users/magnus/Downloads/data/test.csv"
def entropy(V):
""" ENTROPY SHOWS HOW MUCH OF THE TOTAL DECSISION SPACE AN ATTRIBUTE TAKES UP """
return - sum(vk * log2(vk) for vk in V if vk > 0)
def remainder(attribute, examples):
""" REMAINDER EXPLAINS HOW MUCH IS UNDECIDED AFTER AN ATTRIBUTE IS SET """
remain = 0
p, n = len(examples[examples['CLASS'] == 1]), len(examples[examples['CLASS'] == 2])
for k in examples[attribute].unique():
ex = examples[[attribute, 'CLASS']][examples[attribute] == k]
pk, nk = len(ex[ex['CLASS'] == 1]), len(ex[ex['CLASS'] == 2])
remain += ((pk + nk) / (p + n)) * entropy([pk / (pk + nk), nk / (pk + nk)])
return remain
def importance(attribute, examples):
""" INFORMATION GAIN FORMULA """
p = len(examples[attribute][examples['CLASS'] == 1])
n = len(examples[attribute][examples['CLASS'] == 2])
return entropy([p/(p+n), n/(p+n)]) - remainder(attribute, examples)
def plurality(examples):
return 1 if len(examples['CLASS'][examples['CLASS'] == 1]) > len(examples['CLASS']) / 2 else 2
def decision_tree(examples, attributes, parent_examples):
""" CREATES A DECISION TREE BASED ON A SET OF EXAMPLES AND ATTRIBUTES. """
if examples.empty: return plurality(parent_examples)
elif (examples['CLASS'] == 1).all(): return 1
elif (examples['CLASS'] == 2).all(): return 2
elif attributes.empty: return plurality(examples)
rating = [importance(a, examples) for a in attributes]
A = attributes[rating.index(max(rating))]
node = {A: {}}
for k in examples[A].unique():
node[A][k] = decision_tree(examples[examples[A] == k], attributes.drop(A), examples)
return node
def classify(tree, example):
attr = list(tree.keys())[0]
res = tree[attr][example[attr]]
if isinstance(res, dict):
return classify(res, example)
else:
return res
if __name__ == "__main__":
# Load datasets:
training = pd.read_csv(_TRAINING_FILE, header=0)
testing = pd.read_csv(_TESTING_FILE, header=0)
# Build tree:
tree = decision_tree(training, training.columns[:-1], None)
# Test by classifying each dataset:
for name, data in {"train":training, "test": testing}.items():
correct = 0
for _, example in data.iterrows():
classification = example['CLASS']
result = classify(tree, example.drop('CLASS'))
correct += 1 if result == classification else 0
print("Accuracy on", name, "set:\t", correct / len(data))
|
Python
| 68
| 38.75
| 98
|
/boolean_decision_tree.py
| 0.61695
| 0.607698
|
MagnusPoppe/hidden-markov-models
|
refs/heads/master
|
# %%
import numpy as np
# Transition model for state_t (Answer to to PART A, 1)
Xt = np.array([[0.7, 0.3], [0.3, 0.7]])
# Sensor model for state_t (Answer to PART A, 2)
O1 = np.array([[0.9, .0], [.0, 0.2]])
O3 = np.array([[0.1, .0], [.0, 0.8]])
init = np.array([0.5, 0.5])
def forward(f, Xt, OT, OF, E, k):
t = Xt.transpose().dot(f) # Transition
u = (OT if E[k] else OF).dot(t) # Update
delta = u / np.sum(u) # Normalize
# Day 0 (base case)?
if not k:
return delta
return forward(delta, Xt, OT, OF, E, k-1)
def backward(Xt, OT, OF, E, k):
e = (OT if E[k] else OF)
if k < len(E)-1:
res = Xt.dot(e).dot(backward(Xt, OT, OF, E, k+1))
else:
res = Xt.dot(e).dot(np.array([1, 1]))
return res / np.sum(res)
E = [True, True]
rain_day_2 = forward(init, Xt, O1, O3, E, len(E)-1)
print("Probability of rain on day 2 using forward: ", rain_day_2)
E = np.array([True, True, False, True, True])
print("Probability of rain on day 5 using forward: ", forward(init, Xt, O1, O3, E, len(E)-1))
print("Probability of rain on day 2 using backward: ", backward(Xt, O1, O3, E, 0))
|
Python
| 40
| 28
| 94
|
/hmm.py
| 0.554217
| 0.512909
|
MagnusPoppe/hidden-markov-models
|
refs/heads/master
|
import copy
# Setup:
s = "s"
states = ["s", "!s"]
actions = ["N", "M"]
Xt = {"A1": 1.0, "A2": 1.0}
R = {"s": 2.0, "!s": 3.0}
y = 0.5
def E(c, R):
E = c * max(R.values())
return E
def max_key(dictionary):
return list(Xt.keys())[list(Xt.values()).index(max(Xt.values()))]
def value_iteration(states, Xt, y):
iterations = 0
best = 0
U = [0] * len(states)
U_ = [0] * len(states)
A = [""] * len(states)
while (best < E((1 - y), R) / y and iterations < 1000):
U = copy.deepcopy(U_)
best = 0
for i, state in enumerate(states):
# VELGER UANSETT DEN MEST SANNSYNLIGE TRANSITION... DET ER JO IKKE NOE BRA POLICY...
best_action = max_key(Xt)
U_[i] = R[state] + y * max([a * U[i] for a in Xt.values()])
if abs(U_[i] - U[i]) > best:
best = abs(U_[i] - U[i])
iterations += 1
# y = y * 0.99
print("Found optimal policy after %d iteration(s)" % iterations)
print("Best policy: ", str(A))
value_iteration(states, Xt, y)
|
Python
| 45
| 22.911112
| 96
|
/value_iteration_world.py
| 0.496283
| 0.472119
|
SGuo1995/Mushroom-poisonous-prediction
|
refs/heads/master
|
#!/usr/bin/env python
# coding: utf-8
# In[433]:
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from sklearn import preprocessing
from sklearn.preprocessing import LabelEncoder
import seaborn as sns
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import StandardScaler
from sklearn.decomposition import PCA
from sklearn import tree
from sklearn.metrics import accuracy_score
from sklearn.metrics import classification_report
from sklearn.metrics import confusion_matrix
from sklearn.ensemble import RandomForestClassifier
from sklearn import linear_model
from sklearn.svm import SVC
from sklearn.linear_model import LogisticRegression
from sklearn.neighbors import KNeighborsClassifier
from sklearn.metrics import accuracy_score
import sklearn.metrics as metrics
from sklearn.model_selection import KFold
from sklearn.model_selection import cross_val_score,cross_val_predict
from sklearn.linear_model import RandomizedLogisticRegression
from sklearn.ensemble import ExtraTreesClassifier
import pydotplus
from IPython.display import Image
import os
# os.environ["PATH"] += os.pathsep + 'D:/Anaconda/Graphviz/bin/'
df=pd.read_csv("D:/Class Files/5243/mushrooms.csv",header=0)
# df.columns=['age','sex','chest_pain_type','resting_blood_pressure','serum_cholestoral','fasting_blood_sugar','resting_electrocardiographic_results','maximum_heart_rate_achieved','exercise_induced_angina','ST_depression','the_slope_of_the_peak_exercise_ST_segment','number_of_major_vessels','thal','target']
df.head(10)
# In[434]:
print('Number of instances = %d' %(df.shape[0]))
print('Number of attributes = %d\n' %(df.shape[1]))
print(df.dtypes)
df['class'].value_counts()
# In[435]:
df = df.replace('?',np.NaN)
print('Number of instances = %d' % (df.shape[0]))
print('Number of attributes = %d' % (df.shape[1]))
print('Number of missing values:')
for col in df.columns:
print('\t%s: %d' % (col,df[col].isna().sum()))
# In[436]:
##### Class poisonous=1 #####
# In[437]:
df.shape
spore_print_color=df['spore-print-color'].value_counts()
print(spore_print_color)
# In[438]:
#
m_height = spore_print_color.values.tolist() #Provides numerical values
spore_print_color.axes #Provides row labels
spore_print_color_labels = spore_print_color.axes[0].tolist()
ind = np.arange(9) # the x locations for the groups
width = 0.8 # the width of the bars
colors = ['#f8f8ff','brown','black','chocolate','red','yellow','orange','blue','purple']
fig, ax = plt.subplots(figsize=(10,5))
mushroom_bars = ax.bar(ind, m_height , width, color=colors)
ax.set_xlabel("spore print color",fontsize=20)
ax.set_ylabel('Quantity',fontsize=20)
ax.set_title('Mushrooms spore print color',fontsize=22)
ax.set_xticks(ind) #Positioning on the x axis
ax.set_xticklabels(('white','brown','black','chocolate','red','yellow','orange','blue','purple')),
for bars in mushroom_bars:
height = bars.get_height()
ax.text(bars.get_x() + bars.get_width()/2., 1*height,'%d' % int(height),
ha='center', va='bottom',fontsize=10)
plt.show()
# In[439]:
poisonous_cc = []
edible_cc = []
for spore_print_color in spore_print_color_labels:
size = len(df[df['spore-print-color'] == spore_print_color].index)
edibles = len(df[(df['spore-print-color'] == spore_print_color) & (df['class'] == 'e')].index)
edible_cc.append(edibles)
poisonous_cc.append(size-edibles)
width=0.4
fig, ax = plt.subplots(figsize=(12,7))
edible_bars = ax.bar(ind, edible_cc , width, color='g')
poison_bars = ax.bar(ind+width, poisonous_cc , width, color='r')
ax.set_xticks(ind + width / 2) #Positioning on the x axis
ax.set_xticklabels(('white','brown','black','chocolate','red','yellow','orange','blue','purple'))
ax.set_xlabel("spore print color",fontsize=20)
ax.set_ylabel('Quantity',fontsize=20)
ax.set_title('Mushrooms spore print color',fontsize=22)
ax.legend((edible_bars,poison_bars),('edible','poisonous'),fontsize=17)
for bars in edible_bars:
height = bars.get_height()
ax.text(bars.get_x() + bars.get_width()/2., 1*height,'%d' % int(height),
ha='center', va='bottom',fontsize=10)
for bars in poison_bars:
height = bars.get_height()
ax.text(bars.get_x() + bars.get_width()/2., 1*height,'%d' % int(height),
ha='center', va='bottom',fontsize=10)
plt.show()
# In[440]:
cap_shape = df['cap-shape'].value_counts()
cap_shapes_size = cap_shape.values.tolist()
cap_shapes_types = cap_shape.axes[0].tolist()
print(cap_shape)
# Data to plot
cap_shape_labels = ('convex','flat','knobbed','bell', 'sunken','conical')
colors = ['r','y','b','brown','g','orange']
explode = (0, 0.1, 0, 0, 0, 0)
fig = plt.figure(figsize=(15,8))
# Plot
plt.title('Mushroom cap shape Type Percentange', fontsize=22)
patches, texts, autotexts = plt.pie(cap_shapes_size, explode=explode, labels=cap_shape_labels, colors=colors,
autopct='%1.1f%%', shadow=True, startangle=160)
for text,autotext in zip(texts,autotexts):
text.set_fontsize(10)
autotext.set_fontsize(10)
plt.axis('equal')
plt.show()
# In[441]:
labelencoder=LabelEncoder()
df[pd.isna(df)]="NaN"
for col in df.columns:
df[col] = labelencoder.fit_transform(df[col])
df.head(5)
# In[442]:
dups = df.duplicated()
print('Number of duplicate rows = %d' % (dups.sum()))
### No duplicated data #####
# In[443]:
# fig, axes = plt.subplots(nrows=1 ,ncols=2 ,figsize=(9, 9))
# bp1 = axes[0,0].boxplot(df['stalk-color-above-ring'],patch_artist=True)
# bp2 = axes[0,1].boxplot(df['stalk-color-below-ring'],patch_artist=True)
ax = sns.boxplot(x='class', y='odor',
data=df)
plt.show()
ax = sns.boxplot(x='class', y='cap-shape',
data=df)
plt.show()
ax = sns.boxplot(x='class', y='cap-surface',
data=df)
plt.show()
ax = sns.boxplot(x='class', y='cap-color',
data=df)
plt.show()
ax = sns.boxplot(x='class', y='bruises',
data=df)
plt.show()
# In[444]:
df2=df[df["class"]==1]
df2['cap-shape'].hist()
plt.title('cap shape distribution in poisonous mushrooms')
plt.grid(True)
plt.show()
df3=df[df["class"]==0]
df3['cap-shape'].hist()
plt.title('cap shape distribution in poisonous mushrooms')
plt.grid(True)
plt.show()
# In[445]:
X = df.iloc[:,1:23] # all rows, all the features and no labels
Y = df.iloc[:, 0] # all rows, label only
# In[446]:
X.corr()
# In[447]:
scaler = StandardScaler()
X=scaler.fit_transform(X)
X
# In[448]:
##### To estimate feature importance ####
model = ExtraTreesClassifier()
model.fit(X, Y)
importance=model.feature_importances_.tolist()
features=df.drop('class',axis=1).columns.values.tolist()
fig, ax = plt.subplots(figsize=(20,5))
ind = np.arange(22)
importance_bars = ax.bar(ind, importance , width=0.1, color=colors)
ax.set_xlabel("Features",fontsize=20)
ax.set_ylabel('Importance',fontsize=20)
ax.set_title('Feature importance',fontsize=22)
ax.set_xticks(ind) #Positioning on the x axis
ax.set_xticklabels(features,rotation='vertical')
index_=importance.index(max(importance))
most_important_features=features[index_]
print('Feature Importance: \n',model.feature_importances_)
print('The most important feature: \n',most_important_features)
# In[449]:
pca = PCA()
pca.fit_transform(X)
covariance=pca.get_covariance()
explained_variance=pca.explained_variance_ratio_
with plt.style.context('classic'):
plt.figure(figsize=(8, 6))
plt.bar(range(22), explained_variance, alpha=0.5, align='center',
label='individual explained variance')
plt.ylabel('Explained variance ratio')
plt.xlabel('Principal components')
plt.legend(loc='best')
plt.tight_layout()
for a,b in zip(range(22),explained_variance):
plt.text(a, b+0.005, '%.2f' % b, ha='center', va= 'bottom',fontsize=7)
# In[450]:
pca = PCA(n_components=15)
X=pca.fit_transform(X)
df_pca=pd.DataFrame(X)
df_pca['class']=Y
df_pca
####### Prepared to building models #######
X_train, X_test, Y_train, Y_test = train_test_split(df_pca.iloc[:,:-1],df_pca.iloc[:,-1],test_size=0.2,random_state=4)
# # # X_train=pd.DataFrame(X_train)
# # X_test=pd.DataFrame(X_test)
# # Y_train=pd.DataFrame(Y_train)
# # Y_test=pd.DataFrame(Y_test)
# # X_train.columns([''])
# columns = ['pca_%i' % i for i in range(10)]
# X = DataFrame(pca.transform(X), columns=columns, index=X.index)
# X.head()
# In[451]:
### Decision Tree #######
dt=tree.DecisionTreeClassifier(criterion='entropy',random_state=10,max_depth=20)
dt=dt.fit(X_train,Y_train)
print('Scores of the classfier:\n', dt.score(X_test, Y_test))
# dot_data = tree.export_graphviz(dt, feature_names=X_train.columns, class_names=['poisonous','edible'], filled=True,
# out_file=None)
# graph = pydotplus.graph_from_dot_data(dot_data)
# Image(graph.create_png())
# In[452]:
# Observing Overfitting or Underfitting ######
maxdepths = [15,20,25,30,35,40,45,50,70,100,120,150,200]
trainAcc = np.zeros(len(maxdepths))
testAcc = np.zeros(len(maxdepths))
index = 0
for depth in maxdepths:
dt = tree.DecisionTreeClassifier(max_depth=depth)
dt = dt.fit(X_train, Y_train)
Y_predTrain = dt.predict(X_train)
Y_predTest = dt.predict(X_test)
trainAcc[index] = accuracy_score(Y_train, Y_predTrain)
testAcc[index] = accuracy_score(Y_test, Y_predTest)
index += 1
# Plot of training and test accuracies
plt.plot(maxdepths,trainAcc,'ro-',maxdepths,testAcc,'bv--')
plt.legend(['Training Accuracy','Test Accuracy'])
plt.xlabel('Max depth')
plt.ylabel('Accuracy')
# In[453]:
dt=tree.DecisionTreeClassifier(criterion='entropy',random_state=10,max_depth=20)
dt = dt.fit(X_train, Y_train)
y_pred = dt.predict(X_test)
cfm = confusion_matrix(Y_test, y_pred)
print(cfm)
print(classification_report(Y_test,y_pred))
# In[454]:
### Random Forest ######
rdf = RandomForestClassifier(n_estimators = 30, criterion = 'entropy', random_state = 42)
rdf.fit(X_train, Y_train)
y_pred = rdf.predict(X_test)
cfm = confusion_matrix(Y_test, y_pred)
print(cfm)
print(classification_report(Y_test,y_pred))
# In[455]:
#### SVM ######
C = [0.01, 0.1, 0.2, 0.5, 0.8, 1, 5, 10, 20, 50]
SVMtrainAcc = []
SVMtestAcc = []
for param in C:
svm = SVC(C=param,kernel='rbf',gamma='auto')
svm.fit(X_train, Y_train)
Y_predTrain = svm.predict(X_train)
Y_predTest = svm.predict(X_test)
SVMtrainAcc.append(accuracy_score(Y_train, Y_predTrain))
SVMtestAcc.append(accuracy_score(Y_test, Y_predTest))
plt.plot(C, SVMtrainAcc, 'ro-', C, SVMtestAcc,'bv--')
plt.legend(['Training Accuracy','Test Accuracy'])
plt.xlabel('C')
plt.xscale('log')
plt.ylabel('Accuracy')
plt.show()
### Find the optimal hyperparameter C ######
svm = SVC(C=1,kernel='rbf',gamma='auto',probability=True)
svm.fit(X_train, Y_train)
print('Scores of the classfier:\n', svm.score(X_test, Y_test))
y_pred = svm.predict(X_test)
cfm = confusion_matrix(Y_test, y_pred)
print('Confusion matrix: \n',cfm)
print(classification_report(Y_test,y_pred))
# In[456]:
class LogisticRegression_Scratch:
#### Initiate object with learning_rate, num_iteration, here, I allow to add the intercept#####
def __init__(self, num_iter,learning_rate, fit_intercept=True):
self.learning_rate = learning_rate
self.num_iter = num_iter
self.fit_intercept = fit_intercept
##### Initiate intercept as 1 ####
def add_intercept(self, X):
intercept = np.ones((X.shape[0], 1))
return np.concatenate((intercept, X), axis=1)
def sigmoid(self, z):
#### probability function ####
return 1 / (1 + np.exp(-z))
### loss function #####
def loss_funtion(self, p, y):
return (-y * np.log(p) - (1 - y) * np.log(1 - p)).mean()
def fit(self, X, y):
if self.fit_intercept:
X = self.add_intercept(X)
### Initialize weights theta###
self.theta = np.zeros(X.shape[1])
### Update weights theta num_iter times ####
for i in range(self.num_iter):
z = np.dot(X, self.theta)
p = self.sigmoid(z)
### calculate the gradient descent of loss function with respect to theta ######
gradient_descent = np.dot(X.T, (p - y)) / y.size
### Update theta#
self.theta -= self.learning_rate * gradient_descent
print('Intercept and Coefficient of each attributes: \n',self.theta)
####Calculate prediction probability ####
def predict_prob(self, X):
if self.fit_intercept:
X = self.add_intercept(X)
z=np.dot(X, self.theta)
return self.sigmoid(z)
### Determine class labels as either 1 or 0 by comparing with threshold #####
def predict(self, X, threshold):
return self.predict_prob(X) >= threshold
# In[457]:
### Using benchmark dataset which is wine quality dataset to test its performance #####
benchmark_df = pd.read_csv('https://archive.ics.uci.edu/ml/machine-learning-databases/wine-quality/winequality-red.csv' ,sep=';',header=0)
benchmark_df.head()
benchmark_df['class'] = benchmark_df['quality'].apply(lambda x: 0 if x<=5 else 1)
#Create a binary class
benchmark_df=benchmark_df.drop(['quality'],axis=1)
benchmark_df.head(10)
benchmark_X=benchmark_df.drop(['class'],axis=1)
benchmark_Y=benchmark_df['class']
scaler = StandardScaler()
benchmark_X=scaler.fit_transform(benchmark_X)
benchmark_X_train,benchmark_X_test,benchmark_Y_train,benchmark_Y_test=train_test_split(benchmark_X,benchmark_Y,test_size=0.2,random_state=4)
LR_scratch=LogisticRegression_Scratch(num_iter=30000,learning_rate=0.5)
LR_scratch.fit(benchmark_X_train,benchmark_Y_train)
y_pred_bm=LR_scratch.predict(benchmark_X_test,0.5)
cfm = confusion_matrix(benchmark_Y_test, y_pred_bm)
print('Confusion matrix: \n',cfm)
print(classification_report(benchmark_Y_test,y_pred_bm))
# In[477]:
LR_scratch=LogisticRegression_Scratch(num_iter=20000,learning_rate=0.05)
LR_scratch.fit(X_train,Y_train)
y_pred1=LR_scratch.predict(X_test,0.4)
cfm = confusion_matrix(Y_test, y_pred1)
print('Confusion matrix: \n',cfm)
print(classification_report(Y_test,y_pred1))
# In[474]:
LR = LogisticRegression(random_state=10, solver='sag').fit(X_train, Y_train)
print('Intercept and Coefficient of each attributes: \n',np.insert(LR.coef_[0],0,LR.intercept_))
y_pred2=LR.predict(X_test)
cfm = confusion_matrix(Y_test, y_pred2)
print('Confusion matrix: \n',cfm)
print(classification_report(Y_test,y_pred2))
# In[481]:
#### kNN ####
### Since LabelEncoder will bring unexpected 'order' to each attributes, so we should transform each attribute to some dummies variables and Standalization ###
### Have to do another preprocessing steps #####
####### Preprocessing #####
scaler=StandardScaler()
df_dummies=pd.get_dummies(df,columns=df.columns)
X_dummies=df_dummies.drop(['class_0','class_1'],axis=1)
X_dummies=scaler.fit_transform(X_dummies)
pca = PCA(n_components=15)
X_dummies=pca.fit_transform(X_dummies)
Y=df['class']
X_train_dummies, X_test_dummies, Y_train, Y_test = train_test_split(X_dummies,Y,test_size=0.2,random_state=4)
########## Finding best value of k #######
numNeighbors=[2,5,7,10,15]
trainAcc = []
testAcc = []
for k in numNeighbors:
knn = KNeighborsClassifier(n_neighbors=k, metric='minkowski', p=2)
knn.fit(X_train_dummies, Y_train)
Y_predTrain = knn.predict(X_train_dummies)
Y_predTest = knn.predict(X_test_dummies)
trainAcc.append(accuracy_score(Y_train, Y_predTrain))
testAcc.append(accuracy_score(Y_test, Y_predTest))
plt.plot(numNeighbors, trainAcc, 'ro-', numNeighbors, testAcc,'bv--')
plt.legend(['Training Accuracy','Test Accuracy'])
plt.xlabel('Number of neighbors')
plt.ylabel('Accuracy')
plt.show()
#### Decided k = 5 ####
knn = KNeighborsClassifier(n_neighbors=5, metric='minkowski', p=2)
knn.fit(X_train_dummies, Y_train)
y_pred = knn.predict(X_test_dummies)
cfm = confusion_matrix(Y_test, y_pred)
print(cfm)
print(classification_report(Y_test,y_pred))
# In[480]:
###### Cross Validation to select model ######
### Decision Tree validation #####
i=1
accuracy=0
kFold = KFold(n_splits=5, shuffle=True, random_state=None)
for train_index, validation_index in kFold.split(X_train):
X_train2 = X_train.iloc[train_index]
X_validation = X_train.iloc[validation_index]
Y_train2 = Y_train.iloc[train_index]
Y_validation = Y_train.iloc[validation_index]
dt.fit(X_train2,Y_train2)
y_pred=dt.predict(X_validation)
print("{}'s Iteration\n".format(i))
print('Scores: \n',dt.score(X_validation,Y_validation))
print('\n',confusion_matrix(Y_validation,y_pred),'\n')
print(classification_report(Y_validation,y_pred))
### ROC curve for each run ###
probs = dt.predict_proba(X_validation)
preds = probs[:,1]
fpr, tpr, threshold = metrics.roc_curve(Y_validation, preds,pos_label=1)
roc_auc = metrics.auc(fpr, tpr)
plt.title('Receiver Operating Characteristic of Decision Tree')
plt.plot(fpr, tpr, 'b', label = 'AUC = %0.2f' % roc_auc)
plt.legend(loc = 'lower right')
plt.plot([0, 1], [0, 1],'r--')
plt.xlim([0, 1])
plt.ylim([0, 1])
plt.ylabel('True Positive Rate')
plt.xlabel('False Positive Rate')
plt.show()
i=i+1
score=dt.score(X_validation,Y_validation)
accuracy=accuracy+score
print('Average accuracy of k-runs: \n',(accuracy/5))
# In[462]:
#### Cross Validation to evaluate Decision Tree using average scores #####
scores = cross_val_score(dt, X_train, Y_train, cv=5, scoring='accuracy')
print(scores)
print('Mean score:\n',scores.mean())
# In[463]:
#### Random Forest validation #####
i=1
accuracy=0
kFold = KFold(n_splits=5, shuffle=True, random_state=None)
for train_index, validation_index in kFold.split(X_train):
X_train2 = X_train.iloc[train_index]
X_validation = X_train.iloc[validation_index]
Y_train2 = Y_train.iloc[train_index]
Y_validation = Y_train.iloc[validation_index]
rdf.fit(X_train2,Y_train2)
y_pred=rdf.predict(X_validation)
print("{}'s Iteration\n".format(i))
print('Scores: \n',rdf.score(X_validation,Y_validation))
print('\n',confusion_matrix(Y_validation,y_pred),'\n')
print(classification_report(Y_validation,y_pred))
### ROC curve for each run ###
probs = rdf.predict_proba(X_validation)
preds = probs[:,1]
fpr, tpr, threshold = metrics.roc_curve(Y_validation, preds,pos_label=1)
roc_auc = metrics.auc(fpr, tpr)
plt.title('Receiver Operating Characteristic of Random Forest')
plt.plot(fpr, tpr, 'b', label = 'AUC = %0.2f' % roc_auc)
plt.legend(loc = 'lower right')
plt.plot([0, 1], [0, 1],'r--')
plt.xlim([0, 1])
plt.ylim([0, 1])
plt.ylabel('True Positive Rate')
plt.xlabel('False Positive Rate')
plt.show()
i=i+1
score=rdf.score(X_validation,Y_validation)
accuracy=accuracy+score
print('Average accuracy of k-runs: \n',(accuracy/5))
# In[464]:
scores = cross_val_score(rdf, X_train, Y_train, cv=5, scoring='accuracy')
print(scores)
print('Mean score:\n',scores.mean())
# In[465]:
##### SVM validation ###
i=1
accuracy=0
kFold = KFold(n_splits=5, shuffle=True, random_state=None)
for train_index, validation_index in kFold.split(X_train):
X_train2 = X_train.iloc[train_index]
X_validation = X_train.iloc[validation_index]
Y_train2 = Y_train.iloc[train_index]
Y_validation = Y_train.iloc[validation_index]
svm.fit(X_train2,Y_train2)
y_pred=rdf.predict(X_validation)
print("{}'s Iteration\n".format(i))
print('Scores: \n',svm.score(X_validation,Y_validation))
print('\n',confusion_matrix(Y_validation,y_pred),'\n')
print(classification_report(Y_validation,y_pred))
### ROC curve for each run ###
probs = svm.predict_proba(X_validation)
preds = probs[:,1]
fpr, tpr, threshold = metrics.roc_curve(Y_validation, preds,pos_label=1)
roc_auc = metrics.auc(fpr, tpr)
plt.title('Receiver Operating Characteristic of SVM')
plt.plot(fpr, tpr, 'b', label = 'AUC = %0.2f' % roc_auc)
plt.legend(loc = 'lower right')
plt.plot([0, 1], [0, 1],'r--')
plt.xlim([0, 1])
plt.ylim([0, 1])
plt.ylabel('True Positive Rate')
plt.xlabel('False Positive Rate')
plt.show()
i=i+1
score=svm.score(X_validation,Y_validation)
accuracy=accuracy+score
print('Average accuracy of k-runs: \n',(accuracy/5))
# In[466]:
scores = cross_val_score(svm, X_train, Y_train, cv=5, scoring='accuracy')
print(scores)
print('Mean score:\n',scores.mean())
# In[467]:
##### LogesticRegression_scratch #####
i=1
accuracy=0
kFold = KFold(n_splits=5, shuffle=True, random_state=None)
for train_index, validation_index in kFold.split(X_train):
X_train2 = X_train.iloc[train_index]
X_validation = X_train.iloc[validation_index]
Y_train2 = Y_train.iloc[train_index]
Y_validation = Y_train.iloc[validation_index]
LR_scratch.fit(X_train2,Y_train2)
y_pred=LR_scratch.predict(X_validation,0.5)
print("{}'s Iteration\n".format(i))
print('Scores: \n',accuracy_score(Y_validation,y_pred))
print('\n',confusion_matrix(Y_validation,y_pred),'\n')
print(classification_report(Y_validation,y_pred))
### ROC curve for each run ###
probs = LR_scratch.predict_prob(X_validation)
# preds = probs[:,0]
fpr, tpr, threshold = metrics.roc_curve(Y_validation, probs,pos_label=1)
roc_auc = metrics.auc(fpr, tpr)
plt.title('Receiver Operating Characteristic of Logistic Regression from scratch')
plt.plot(fpr, tpr, 'b', label = 'AUC = %0.2f' % roc_auc)
plt.legend(loc = 'lower right')
plt.plot([0, 1], [0, 1],'r--')
plt.xlim([0, 1])
plt.ylim([0, 1])
plt.ylabel('True Positive Rate')
plt.xlabel('False Positive Rate')
plt.show()
i=i+1
score=accuracy_score(Y_validation,y_pred)
accuracy=accuracy+score
print('Average accuracy of k-runs: \n',(accuracy/5))
# In[468]:
##### LogisticRegression #####
i=1
accuracy=0
kFold = KFold(n_splits=5, shuffle=True, random_state=None)
for train_index, validation_index in kFold.split(X_train):
X_train2 = X_train.iloc[train_index]
X_validation = X_train.iloc[validation_index]
Y_train2 = Y_train.iloc[train_index]
Y_validation = Y_train.iloc[validation_index]
LR.fit(X_train2,Y_train2)
y_pred=LR.predict(X_validation)
print("{}'s Iteration\n".format(i))
print('Scores: \n',LR.score(X_validation,Y_validation))
print('\n',confusion_matrix(Y_validation,y_pred),'\n')
print(classification_report(Y_validation,y_pred))
### ROC curve for each run ###
probs = LR.predict_proba(X_validation)
preds = probs[:,1]
fpr, tpr, threshold = metrics.roc_curve(Y_validation, preds,pos_label=1)
roc_auc = metrics.auc(fpr, tpr)
plt.title('Receiver Operating Characteristic of Logistic Regression')
plt.plot(fpr, tpr, 'b', label = 'AUC = %0.2f' % roc_auc)
plt.legend(loc = 'lower right')
plt.plot([0, 1], [0, 1],'r--')
plt.xlim([0, 1])
plt.ylim([0, 1])
plt.ylabel('True Positive Rate')
plt.xlabel('False Positive Rate')
plt.show()
i=i+1
score=LR.score(X_validation,Y_validation)
accuracy=accuracy+score
print('Average accuracy of k-runs: \n',(accuracy/5))
# In[469]:
scores = cross_val_score(LR, X_train, Y_train, cv=5, scoring='accuracy')
print(scores)
print('Mean score:\n',scores.mean())
# In[470]:
### kNN ######
i=1
accuracy=0
kFold = KFold(n_splits=5, shuffle=True, random_state=None)
X_train_dummies=pd.DataFrame(X_train_dummies)
for train_index, validation_index in kFold.split(X_train_dummies):
X_train2 = X_train_dummies.iloc[train_index]
X_validation = X_train_dummies.iloc[validation_index]
Y_train2 = Y_train.iloc[train_index]
Y_validation = Y_train.iloc[validation_index]
knn.fit(X_train2,Y_train2)
y_pred=knn.predict(X_validation)
print("{}'s Iteration\n".format(i))
print('Scores: \n',knn.score(X_validation,Y_validation))
print('\n',confusion_matrix(Y_validation,y_pred),'\n')
print(classification_report(Y_validation,y_pred))
### ROC curve for each run ###
probs = knn.predict_proba(X_validation)
preds = probs[:,1]
fpr, tpr, threshold = metrics.roc_curve(Y_validation, preds,pos_label=1)
roc_auc = metrics.auc(fpr, tpr)
plt.title('Receiver Operating Characteristic of kNN')
plt.plot(fpr, tpr, 'b', label = 'AUC = %0.2f' % roc_auc)
plt.legend(loc = 'lower right')
plt.plot([0, 1], [0, 1],'r--')
plt.xlim([0, 1])
plt.ylim([0, 1])
plt.ylabel('True Positive Rate')
plt.xlabel('False Positive Rate')
plt.show()
i=i+1
score=knn.score(X_validation,Y_validation)
accuracy=accuracy+score
print('Average accuracy of k-runs: \n',(accuracy/5))
# In[471]:
scores = cross_val_score(knn, X_train, Y_train, cv=5, scoring='accuracy')
print(scores)
print('Mean score:\n',scores.mean())
# In[482]:
##### knn, SVM, Random Forest highest scores, Decision tree a little bit lower, the two Logistic Regression classifier loweset with about 0.90 ##
### knn might cause dimension sparse ####
### Choose kNN as my model ####
knn = KNeighborsClassifier(n_neighbors=5, metric='minkowski', p=2)
knn.fit(X_train_dummies, Y_train)
print('Scores of the kNN classfier:\n', knn.score(X_test_dummies, Y_test))
y_pred = knn.predict(X_test_dummies)
cfm = confusion_matrix(Y_test, y_pred)
print(cfm)
print(classification_report(Y_test,y_pred))
# svm = SVC(C=1,kernel='rbf',gamma='auto',probability=True)
# svm.fit(X_train, Y_train)
# print('Scores of the SVM classfier:\n', svm.score(X_test, Y_test))
# y_pred = svm.predict(X_test)
# cfm = confusion_matrix(Y_test, y_pred)
# print('Confusion matrix: \n',cfm)
# print(classification_report(Y_test,y_pred))
def get_confusion_matrix_values(y_true, y_pred):
cm = confusion_matrix(y_true, y_pred)
return(cm[0][0], cm[0][1], cm[1][0], cm[1][1])
TN, FP, FN, TP = get_confusion_matrix_values(Y_test, y_pred)
print('\nTPR: ',TP/(TP+FN))
print('\nFPR: ',FP/(FP+TN))
# In[ ]:
|
Python
| 907
| 27.802647
| 308
|
/Final Project_guo_1449.py
| 0.667215
| 0.646739
|
greenmato/slackline-spots
|
refs/heads/master
|
# Generated by Django 2.0.1 on 2018-03-05 22:15
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('map', '0008_auto_20180305_2211'),
]
operations = [
migrations.CreateModel(
name='Vote',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('positive', models.BooleanField()),
('spot', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='map.Spot')),
],
),
migrations.AlterField(
model_name='rating',
name='score',
field=models.IntegerField(),
),
]
|
Python
| 27
| 27.925926
| 114
|
/spots-api/map/migrations/0009_auto_20180305_2215.py
| 0.564661
| 0.524968
|
greenmato/slackline-spots
|
refs/heads/master
|
from django.urls import path
from django.conf import settings
from django.conf.urls.static import static
from map.views import MapView
from map.api import SpotsApi, SpotApi, RatingsApi, VotesApi
app_name = 'map'
urlpatterns = [
path('', MapView.as_view(), name='index'),
path('spots/', SpotsApi.as_view()),
path('spots/<int:spot_id>/', SpotApi.as_view()),
path('spots/<int:spot_id>/ratings/', RatingsApi.as_view()),
path('spots/<int:spot_id>/votes/', VotesApi.as_view()),
]
if settings.DEBUG is True:
urlpatterns += static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)
|
Python
| 19
| 34.052631
| 80
|
/spots-api/map/urls.py
| 0.636637
| 0.636637
|
greenmato/slackline-spots
|
refs/heads/master
|
from django import forms
from django.forms import ModelForm, Textarea
from map.models import Spot, Rating, Vote
class SpotForm(ModelForm):
class Meta:
model = Spot
fields = ['name', 'description', 'latitude', 'longitude']
widgets = {
'latitude': forms.HiddenInput(),
'longitude': forms.HiddenInput(),
}
class RatingForm(ModelForm):
class Meta:
model = Rating
fields = ['spot', 'rating_type', 'score']
widgets = {
'spot': forms.HiddenInput(),
'rating_type': forms.HiddenInput(),
}
class VoteForm(ModelForm):
class Meta:
model = Vote
fields = ['positive']
widgets = {
'positive': forms.HiddenInput(),
}
|
Python
| 30
| 24.766666
| 65
|
/spots-api/map/forms.py
| 0.560155
| 0.560155
|
greenmato/slackline-spots
|
refs/heads/master
|
from django.db import models
from django.core.validators import MaxValueValidator, MinValueValidator
class Spot(models.Model):
name = models.CharField(max_length=50)
description = models.CharField(max_length=500)
latitude = models.DecimalField(max_digits=10, decimal_places=7)
longitude = models.DecimalField(max_digits=10, decimal_places=7)
created = models.DateTimeField(auto_now_add=True)
updated = models.DateTimeField(auto_now=True)
def __str__(self):
spot = "Spot %s - %s: %s" % (self.id, self.name, self.description)
return spot
def get_score(self):
votes = Vote.objects.filter(spot=self.id)
score = 0
for vote in votes:
score += 1 if vote.positive else -1
return score
def get_ratings_dict(self):
ratings = Rating.objects.filter(spot=self.id)
ratings_dict = {}
for rating in ratings:
if rating.rating_type.name in ratings_dict:
ratings_dict[rating.rating_type.name] += rating.score
else:
ratings_dict[rating.rating_type.name] = rating.score
for rating_type, score in ratings_dict.items():
ratings_dict[rating_type] = round((score / ratings.count()), 2)
return ratings_dict
class RatingType(models.Model):
name = models.CharField(max_length=50)
def __str__(self):
rating_type = self.name
return rating_type
class Rating(models.Model):
spot = models.ForeignKey(Spot, on_delete=models.CASCADE)
rating_type = models.ForeignKey(RatingType, on_delete=models.CASCADE)
score = models.IntegerField(
validators=[
MaxValueValidator(10),
MinValueValidator(1)
]
)
class Vote(models.Model):
spot = models.ForeignKey(Spot, on_delete=models.CASCADE)
positive = models.BooleanField()
|
Python
| 64
| 28.453125
| 75
|
/spots-api/map/models.py
| 0.64191
| 0.6313
|
greenmato/slackline-spots
|
refs/heads/master
|
# Generated by Django 2.0.1 on 2018-03-05 21:31
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('map', '0004_ratingtype'),
]
operations = [
migrations.AlterField(
model_name='spot',
name='latitude',
field=models.DecimalField(decimal_places=7, max_digits=10),
),
migrations.AlterField(
model_name='spot',
name='longitude',
field=models.DecimalField(decimal_places=7, max_digits=10),
),
]
|
Python
| 23
| 23.782608
| 71
|
/spots-api/map/migrations/0005_auto_20180305_2131.py
| 0.573684
| 0.529825
|
greenmato/slackline-spots
|
refs/heads/master
|
from django.shortcuts import render
from django.views import View
class MapView(View):
def get(self, request):
return render(request, 'map/index.html')
|
Python
| 6
| 26.5
| 48
|
/spots-api/map/views.py
| 0.727273
| 0.727273
|
greenmato/slackline-spots
|
refs/heads/master
|
# Generated by Django 2.0.1 on 2018-03-05 21:39
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('map', '0006_rating'),
]
operations = [
migrations.RenameField(
model_name='rating',
old_name='rating_type_id',
new_name='rating_type',
),
migrations.RenameField(
model_name='rating',
old_name='spot_id',
new_name='spot',
),
]
|
Python
| 23
| 20.608696
| 47
|
/spots-api/map/migrations/0007_auto_20180305_2139.py
| 0.527163
| 0.488934
|
greenmato/slackline-spots
|
refs/heads/master
|
# Generated by Django 2.0.1 on 2018-03-05 22:11
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('map', '0007_auto_20180305_2139'),
]
operations = [
migrations.RenameField(
model_name='rating',
old_name='rating_type',
new_name='rating',
),
migrations.AddField(
model_name='rating',
name='score',
field=models.IntegerField(default=0),
),
]
|
Python
| 23
| 21.652174
| 49
|
/spots-api/map/migrations/0008_auto_20180305_2211.py
| 0.547025
| 0.485605
|
greenmato/slackline-spots
|
refs/heads/master
|
from abc import ABC, ABCMeta, abstractmethod
from django.forms.models import model_to_dict
from django.http import HttpResponse, JsonResponse
from django.shortcuts import get_object_or_404
from django.views import View
from django.views.decorators.csrf import csrf_exempt
from django.utils.decorators import method_decorator
from map.models import Spot
from map.models import Vote
from map.forms import SpotForm, VoteForm
class BaseApi(View):
__metaclass__ = ABCMeta
def _response(self, body):
response = {'data': body}
return JsonResponse(response)
def _error_response(self, status, error):
response = {'error': error}
return JsonResponse(response, status=status)
class BaseSpotsApi(BaseApi):
__metaclass__ = ABCMeta
def _spot_to_dict(self, spot):
spot_dict = model_to_dict(spot)
spot_dict['score'] = spot.get_score()
return spot_dict
# @method_decorator(csrf_exempt, name='dispatch')
class SpotsApi(BaseSpotsApi):
def get(self, request):
# TODO: only retrieve nearest spots and make them dynamically load as the map moves
nearby_spots = Spot.objects.all()
nearby_spots = list(map(self._spot_to_dict, nearby_spots))
return self._response(nearby_spots)
def post(self, request):
form = SpotForm(request.POST)
if form.is_valid():
new_spot = Spot(
name=request.POST['name'],
description=request.POST['description'],
latitude=request.POST['latitude'],
longitude=request.POST['longitude']
)
new_spot.save()
return self._response(self._spot_to_dict(new_spot))
return self._error_response(422, 'Invalid input.')
class SpotApi(BaseSpotsApi):
def get(self, request, spot_id):
spot = get_object_or_404(Spot, pk=spot_id)
return self._response(self._spot_to_dict(spot))
# @method_decorator(csrf_exempt, name='dispatch')
class RatingsApi(BaseApi):
def get(self, request, spot_id):
spot = get_object_or_404(Spot, pk=spot_id)
ratings = Rating.objects.filter(spot=spot_id, rating_type=rating_type.id)
pass
def post(self, request, spot_id):
spot = get_object_or_404(Spot, pk=spot_id)
pass
# @method_decorator(csrf_exempt, name='dispatch')
class VotesApi(BaseApi):
def get(self, request, spot_id):
spot = get_object_or_404(Spot, pk=spot_id)
return self._response(spot.get_score())
def post(self, request, spot_id):
spot = get_object_or_404(Spot, pk=spot_id)
form = VoteForm(request.POST)
if form.is_valid():
new_vote = Vote(spot=spot, positive=request.POST['positive'])
new_vote.save()
return self._response(model_to_dict(new_vote))
return self._error_response(422, 'Invalid input.')
|
Python
| 95
| 29.56842
| 91
|
/spots-api/map/api.py
| 0.642906
| 0.634642
|
greenmato/slackline-spots
|
refs/heads/master
|
# Generated by Django 2.0 on 2017-12-17 18:04
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Spot',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=50)),
('description', models.CharField(max_length=500)),
('latitude', models.DecimalField(decimal_places=6, max_digits=9)),
('longitude', models.DecimalField(decimal_places=6, max_digits=9)),
],
),
]
|
Python
| 24
| 28.791666
| 114
|
/spots-api/map/migrations/0001_initial.py
| 0.573427
| 0.541259
|
greenmato/slackline-spots
|
refs/heads/master
|
# Generated by Django 2.0.1 on 2018-03-06 21:19
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('map', '0009_auto_20180305_2215'),
]
operations = [
migrations.RenameField(
model_name='rating',
old_name='rating',
new_name='rating_type',
),
]
|
Python
| 18
| 19.222221
| 47
|
/spots-api/map/migrations/0010_auto_20180306_2119.py
| 0.565934
| 0.480769
|
Turing-IA-IHC/Heart-Attack-Detection-In-Images
|
refs/heads/master
|
"""
Heart attack detection in colour images using convolutional neural networks
This code make a neural network to detect infarcts
Written by Gabriel Rojas - 2019
Copyright (c) 2019 G0 S.A.S.
Licensed under the MIT License (see LICENSE for details)
"""
from os import scandir
import numpy as np
from keras.models import load_model
from keras.preprocessing.image import load_img, img_to_array
from keras.preprocessing.image import ImageDataGenerator
# === Configuration vars ===
# Path of image folder
INPUT_PATH_TEST = "./dataset/test/"
MODEL_PATH = "./model/" + "model.h5" # Full path of model
# Test configurations
WIDTH, HEIGHT = 256, 256 # Size images to train
CLASS_COUNTING = True # Test class per class and show details each
BATCH_SIZE = 32 # How many images at the same time, change depending on your GPU
CLASSES = ['00None', '01Infarct'] # Classes to detect. they most be in same position with output vector
# === ===== ===== ===== ===
print("Loading model from:", MODEL_PATH)
NET = load_model(MODEL_PATH)
NET.summary()
def predict(file):
"""
Returns values predicted
"""
x = load_img(file, target_size=(WIDTH, HEIGHT))
x = img_to_array(x)
x = np.expand_dims(x, axis=0)
array = NET.predict(x)
result = array[0]
answer = np.argmax(result)
return CLASSES[answer], result
print("\n======= ======== ========")
if CLASS_COUNTING:
folders = [arch.name for arch in scandir(INPUT_PATH_TEST) if arch.is_file() == False]
generalSuccess = 0
generalCases = 0
for f in folders:
files = [arch.name for arch in scandir(INPUT_PATH_TEST + f) if arch.is_file()]
clase = f.replace(INPUT_PATH_TEST, '')
print("Class: ", clase)
indivSuccess = 0
indivCases = 0
for a in files:
p, r = predict(INPUT_PATH_TEST + f + "/" + a)
if p == clase:
indivSuccess = indivSuccess + 1
#elif p == '00None':
# print(f + "/" + a)
indivCases = indivCases + 1
print("\tCases", indivCases, "Success", indivSuccess, "Rate", indivSuccess/indivCases)
generalSuccess = generalSuccess + indivSuccess
generalCases = generalCases + indivCases
print("Totals: ")
print("\tCases", generalCases, "Success", generalSuccess, "Rate", generalSuccess/generalCases)
else:
test_datagen = ImageDataGenerator()
test_gen = test_datagen.flow_from_directory(
INPUT_PATH_TEST,
target_size=(HEIGHT, WIDTH),
batch_size=BATCH_SIZE,
class_mode='categorical')
scoreSeg = NET.evaluate_generator(test_gen, 100)
progress = 'loss: {}, acc: {}, mse: {}'.format(
round(float(scoreSeg[0]), 4),
round(float(scoreSeg[1]), 4),
round(float(scoreSeg[2]), 4)
)
print(progress)
print("======= ======== ========")
|
Python
| 88
| 31.875
| 105
|
/test.py
| 0.59678
| 0.583361
|
Turing-IA-IHC/Heart-Attack-Detection-In-Images
|
refs/heads/master
|
"""
Heart attack detection in colour images using convolutional neural networks
This code make a neural network to detect infarcts
Written by Gabriel Rojas - 2019
Copyright (c) 2019 G0 S.A.S.
Licensed under the MIT License (see LICENSE for details)
"""
import os
import sys
from time import time
import tensorflow
import keras
from keras import backend as K
from keras.models import Sequential
from keras.optimizers import SGD
from keras.preprocessing.image import ImageDataGenerator
from keras.layers import Dropout, Flatten, Dense, Activation
from keras.layers import Convolution2D, MaxPooling2D, ZeroPadding2D
# === Configuration vars ===
# Path of image folder (use slash at the end)
INPUT_PATH_TRAIN = "./dataset/train/"
INPUT_PATH_VAL = "./dataset/val/"
INPUT_PATH_TEST = "./dataset/test/"
OUTPUT_DIR = "./model/"
# Checkpoints
EPOCH_CHECK_POINT = 2 # How many epoch til save next checkpoint
NUM_CHECK_POINT = 10 # How many epoch will be saved
KEEP_ONLY_LATEST = False# Keeping only the last checkpoint
# Train configurations
WIDTH, HEIGHT = 256, 256# Size images to train
STEPS = 500 # How many steps per epoch
VALIDATION_STEPS = 100 # How many steps per next validation
BATCH_SIZE = 48 # How many images at the same time, change depending on your GPU
LR = 0.003 # Learning rate
CLASSES = 2 # Don't chage, 0=Infarct, 1=Normal
# === ===== ===== ===== ===
if not os.path.exists(OUTPUT_DIR):
os.mkdir(OUTPUT_DIR)
K.clear_session()
train_datagen = ImageDataGenerator()
val_datagen = ImageDataGenerator()
test_datagen = ImageDataGenerator()
train_gen = train_datagen.flow_from_directory(
INPUT_PATH_TRAIN,
target_size=(HEIGHT, WIDTH),
batch_size=BATCH_SIZE,
class_mode='categorical')
val_gen = val_datagen.flow_from_directory(
INPUT_PATH_VAL,
target_size=(HEIGHT, WIDTH),
batch_size=BATCH_SIZE,
class_mode='categorical')
test_gen = test_datagen.flow_from_directory(
INPUT_PATH_TEST,
target_size=(HEIGHT, WIDTH),
batch_size=BATCH_SIZE,
class_mode='categorical')
NET = Sequential()
NET.add(Convolution2D(64, kernel_size=(3 ,3), padding ="same", input_shape=(256, 256, 3), activation='relu'))
NET.add(MaxPooling2D((3,3), strides=(3,3)))
NET.add(Convolution2D(128, kernel_size=(3, 3), activation='relu'))
NET.add(MaxPooling2D((3,3), strides=(3,3)))
NET.add(Convolution2D(256, kernel_size=(3, 3), activation='relu'))
NET.add(MaxPooling2D((2,2), strides=(2,2)))
NET.add(Convolution2D(512, kernel_size=(3, 3), activation='relu'))
NET.add(MaxPooling2D((2,2), strides=(2,2)))
NET.add(Convolution2D(1024, kernel_size=(3, 3), activation='relu'))
NET.add(MaxPooling2D((2,2), strides=(2,2)))
NET.add(Dropout(0.3))
NET.add(Flatten())
for _ in range(5):
NET.add(Dense(128, activation='relu'))
NET.add(Dropout(0.5))
for _ in range(5):
NET.add(Dense(128, activation='relu'))
NET.add(Dropout(0.5))
for _ in range(5):
NET.add(Dense(128, activation='relu'))
NET.add(Dropout(0.5))
NET.add(Dense(CLASSES, activation='softmax'))
sgd = SGD(lr=LR, decay=1e-4, momentum=0.9, nesterov=True)
NET.compile(optimizer=sgd,
loss='binary_crossentropy',
metrics=['acc', 'mse'])
NET.summary()
for i in range(NUM_CHECK_POINT):
NET.fit_generator(
train_gen,
steps_per_epoch=STEPS,
epochs=EPOCH_CHECK_POINT,
validation_data=val_gen,
validation_steps=VALIDATION_STEPS,
verbose=1
)
print('Saving model: {:02}.'.format(i))
NET.save(OUTPUT_DIR + "{:02}_model.h5".format(i))
|
Python
| 115
| 30.18261
| 109
|
/train.py
| 0.658471
| 0.624156
|
horsinLin/Ajax-Project
|
refs/heads/master
|
from flask import Flask, render_template, request
from flask_sqlalchemy import SQLAlchemy
import pymysql
pymysql.install_as_MySQLdb()
app = Flask(__name__)
app.config['SQLALCHEMY_DATABASE_URI']="mysql://root:horsin@123@localhost:3306/flask"
app.config['SQLALCHEMY_COMMIT_ON_TEARDOWN'] = True
db = SQLAlchemy(app)
class loginUser(db.Model):
__tablename__ = "loginUser"
id = db.Column(db.Integer, primary_key=True, autoincrement=True)
username = db.Column(db.String(30), unique=True)
passwd = db.Column(db.String(120))
def __init__(self, username, passwd):
self.username = username
self.passwd = passwd
def __repr__(self):
return "<loginUser: %r>" % self.username
db.create_all()
@app.route('/login')
def login_views():
return render_template('06-login.html')
@app.route('/server', methods=['POST'])
def server_views():
username = request.form['username']
user = loginUser.query.filter_by(username=username).first()
if user:
return "找到用户名为 %s 的账户" % user.username
else:
return "找不到该用户!"
if __name__ == '__main__':
app.run(debug=True)
|
Python
| 42
| 26
| 84
|
/day02/练习/02-run.py
| 0.663725
| 0.651368
|
horsinLin/Ajax-Project
|
refs/heads/master
|
from flask import Flask, render_template, request
app = Flask(__name__)
@app.route('/01-getxhr')
def getxhr():
return render_template('01-getxhr.html')
@app.route('/02-get')
def get_views():
return render_template('02-get.html')
@app.route('/03-get')
def get03_view():
return render_template('03-get.html')
@app.route('/02-server')
def server02_views():
return "这是AJAX的请求"
@app.route('/03-server')
def server03_views():
uname = request.args.get('uname')
return "欢迎: "+uname
@app.route('/04-post')
def post_views():
return render_template('04-post.html')
@app.route('/04-server', methods=['POST'])
def server04_views():
uname = request.form['uname']
return uname
@app.route('/05-post')
def post05_views():
return render_template('05-post.html')
if __name__ == '__main__':
app.run(debug=True)
|
Python
| 40
| 20.125
| 49
|
/day02/练习/01-run.py
| 0.648104
| 0.60545
|
horsinLin/Ajax-Project
|
refs/heads/master
|
from flask import Flask, render_template, request
from flask_sqlalchemy import SQLAlchemy
import json
import pymysql
pymysql.install_as_MySQLdb()
app = Flask(__name__)
app.config["SQLALCHEMY_DATABASE_URI"]="mysql://root:horsin@123@localhost:3306/flask"
app.config['SQLALCHEMY_COMMIT_ON_TEARDOWN'] = True
db = SQLAlchemy(app)
class Province(db.Model):
__tablename__ = "province"
id = db.Column(db.Integer, primary_key=True, autoincrement=True)
proname = db.Column(db.String(30), nullable=False)
cities = db.relationship("City", backref="province", lazy="dynamic")
def __init__(self, proname):
self.proname = proname
def to_dict(self):
dic = {
'id' : self.id,
'proname' : self.proname
}
return dic
def __repr__(self):
return "<Province : %r>" % self.proname
class City(db.Model):
__tablename__ = "city"
id = db.Column(db.Integer, primary_key=True, autoincrement=True)
cityname = db.Column(db.String(30), nullable=False)
pro_id = db.Column(db.Integer, db.ForeignKey("province.id"))
def __init__(self, cityname, pro_id):
self.cityname = cityname
self.pro_id = pro_id
def to_dict(self):
dic = {
'id' : self.id,
'cityname' : self.cityname,
'pro_id' : self.pro_id
}
return dic
def __repr__(self):
return "<City : %r>" % self.cityname
db.create_all()
@app.route('/province')
def province_views():
return render_template('03-province.html')
@app.route('/loadPro')
def loadPro_views():
provinces = Province.query.all()
list = []
for pro in provinces:
list.append(pro.to_dict())
return json.dumps(list)
@app.route('/loadCity')
def loadCity_view():
pid = request.args.get('pid')
cities = City.query.filter_by(pro_id=pid).all()
list = []
for city in cities:
list.append(city.to_dict())
return list
if __name__ == "__main__":
app.run(debug=True)
|
Python
| 78
| 24.858974
| 89
|
/day03/练习/02-run.py
| 0.58568
| 0.579475
|
horsinLin/Ajax-Project
|
refs/heads/master
|
from flask import Flask, render_template
from flask_sqlalchemy import SQLAlchemy
import json
import pymysql
pymysql.install_as_MySQLdb()
app = Flask(__name__)
app.config["SQLALCHEMY_DATABASE_URI"]="mysql://root:horsin@123@localhost:3306/flask"
app.config['SQLALCHEMY_COMMIT_ON_TEARDOWN'] = True
db = SQLAlchemy(app)
class Users(db.Model):
__tablename__ = "users"
id = db.Column(db.Integer,primary_key=True)
uname = db.Column(db.String(50))
upwd = db.Column(db.String(50))
realname = db.Column(db.String(30))
# 将当前对象中的所有属性封装到一个字典中
def to_dict(self):
dic = {
"id" : self.id,
"uname" : self.uname,
"upwd" : self.upwd,
"realname" : self.realname
}
return dic
def __init__(self,uname,upwd,realname):
self.uname = uname
self.upwd = upwd
self.realname = realname
def __repr__(self):
return "<Users : %r>" % self.uname
@app.route('/json')
def json_views():
# list = ["Fan Bingbing","Li Chen","Cui Yongyuan"]
dic = {
'name' : 'Bingbing Fan',
'age' : 40,
'gender' : "female"
}
uList = [
{
'name' : 'Bingbing Fan',
'age' : 40,
'gender' : "female"
},
{
'name' : 'Li Chen',
"age" : 40,
"gender" : 'male'
}
]
# jsonStr = json.dumps(list)
jsonStr = json.dumps(dic)
return jsonStr
@app.route('/page')
def page_views():
return render_template('01-page.html')
@app.route('/json_users')
def json_users():
# user = Users.query.filter_by(id=1).first()
# print(user)
# return json.dumps(user.to_dict())
users = Users.query.filter_by(id=1).all()
print(users)
list = []
for user in users:
list.append(user.to_dict())
return json.dumps(list)
@app.route('/show_info')
def show_views():
return render_template('02-user.html')
@app.route('/server')
def server_views():
users = Users.query.filter().all()
list = []
for user in users:
list.append(user.to_dict())
return json.dumps(list)
@app.route('/load')
def load_views():
return render_template('04-load.html')
@app.route('/load_server')
def load_server():
return "这是使用jquery的load方法发送的请求"
if __name__ == "__main__":
app.run(debug=True)
|
Python
| 100
| 22.32
| 84
|
/day03/练习/01-run.py
| 0.53786
| 0.526749
|
horsinLin/Ajax-Project
|
refs/heads/master
|
from flask import Flask, render_template, request
from flask_sqlalchemy import SQLAlchemy
import json
import pymysql
pymysql.install_as_MySQLdb()
app = Flask(__name__)
app.config['SQLALCHEMY_DATABASE_URI']="mysql://root:horsin@123@localhost:3306/flask"
app.config['SQLALCHEMY_COMMIT_ON_TEARDOWN']=True
db = SQLAlchemy(app)
class Users(db.Model):
__tablename__ = "loginUser"
id = db.Column(db.Integer, primary_key=True, autoincrement=True)
username = db.Column(db.String(30), unique=True)
passwd = db.Column(db.String(120))
def __init__(self, username):
self.username = username
def to_dict(self):
dic = {
"username" : self.username,
"passwd" : self.passwd
}
return dic
def __repr__(self):
return "<Users : %r>" % self.username
class Province(db.Model):
__tablename__="province"
id = db.Column(db.Integer, primary_key=True, autoincrement=True)
proname = db.Column(db.String(30))
cities = db.relationship("City", backref="province", lazy="dynamic")
def __init__(self, proname):
self.proname = proname
def __repr__(self):
return "<Province : %r>" % self.proname
def to_dict(self):
dic = {
"id" : self.id,
"proname" : self.proname
}
return dic
class City(db.Model):
__tablename__="city"
id = db.Column(db.Integer, primary_key=True, autoincrement=True)
cityname = db.Column(db.String(30))
pro_id = db.Column(db.Integer, db.ForeignKey("province.id"))
def __init__(self, cityname, pro_id):
self.cityname = cityname
self.pro_id = pro_id
def __repr__(self):
return "<City : %r>" % self.cityname
def to_dict(self):
dic = {
"id" : self.id,
"cityname" : self.cityname,
"pro_id" : self.pro_id
}
return dic
@app.route('/01-ajax')
def ajax_views():
return render_template('01-ajax.html')
@app.route('/01-server')
def server_01():
uname = request.args.get("username")
print(uname)
user = Users.query.filter_by(username=uname).first()
if user:
return json.dumps(user.to_dict())
else:
dic = {
'status' : '0',
'msg' : '没有查到任何信息!'
}
return dic
@app.route('/02-province')
def province_views():
return render_template('03-province.html')
@app.route('/loadPro')
def loadPro_views():
provinces = Province.query.all()
list = []
for province in provinces:
list.append(province.to_dict())
return json.dumps(list)
@app.route('/loadCity')
def loadCity_views():
pid = request.args.get("pid")
cities = City.query.filter_by(pro_id=pid).all()
list = []
for city in cities:
list.append(city.to_dict())
return json.dumps(list)
@app.route('/crossdomain')
def crossdomain_views():
return render_template('04-crossdomain.html')
@app.route('/02-server')
def server_02():
return "show('这是server_02响应回来的数据')"
if __name__ == '__main__':
app.run(debug=True)
|
Python
| 121
| 24.504131
| 89
|
/day04/练习/01-run.py
| 0.596894
| 0.584924
|
serhatkg021/parthenia
|
refs/heads/master
|
import RPi.GPIO as GPIO
import time
import os
GPIO.setmode(GPIO.BCM)
GPIO.setup("1",GPIO.IN)
GPIO.setup("2",GPIO.IN)
input = GPIO.input("1")
input = GPIO.input("2")
while True:
inputValue = GPIO.input("1")
if (inputValue == False):
print("1. Görev")
os.system('..\\daire\\main.py') # Daha verimli
# if keyboard.is_pressed("2"):
# os.system('..\\dikdörtgen\\main.py') # Daha verimli
# if keyboard.is_pressed("3"):
# print("3. Görev")
# # os.startfile('..\\daire\\main.py')
|
Python
| 22
| 24.818182
| 65
|
/Proje/Tuş/main2.py
| 0.550265
| 0.534392
|
serhatkg021/parthenia
|
refs/heads/master
|
import keyboard
import os
while True:
if keyboard.is_pressed("1"):
print("1. Görev")
os.system('..\\daire\\main.py')
if keyboard.is_pressed("2"):
os.system('..\\dikdörtgen\\main.py')
if keyboard.is_pressed("3"):
print("3. Görev")
|
Python
| 11
| 24
| 44
|
/Proje/Tuş/main.py
| 0.569343
| 0.551095
|
serhatkg021/parthenia
|
refs/heads/master
|
import cv2
from daire import circleScan
import keyboard
import os
cameraX = 800
cameraY = 600
cap = cv2.VideoCapture(0)
# Cemberin merkezinin ekranın orta noktaya uzaklıgını x ve y cinsinden uzaklıgı
while True:
if keyboard.is_pressed("2"):
print("2. Görev")
cap.release()
cv2.destroyAllWindows()
os.system('..\\dikdörtgen\\main.py') # Daha verimli
break
if keyboard.is_pressed("3"):
cap.release()
cv2.destroyAllWindows()
print("3. Görev")
break
ret, frame = cap.read()
frame = cv2.resize(frame, (cameraX, cameraY))
data = circleScan(frame, cameraX, cameraY)
if data is not None:
print("X : " ,data[0] , " Y : " , data[1])
cv2.imshow("output", frame)
if cv2.waitKey(1) & 0xFF == ord('q'):
break
cap.release()
cv2.destroyAllWindows()
|
Python
| 35
| 23.685715
| 80
|
/Proje/Daire/main.py
| 0.61066
| 0.584009
|
serhatkg021/parthenia
|
refs/heads/master
|
import cv2
import numpy as np
# minDist = 120
# param1 = 50
# param2 = 30
# minRadius = 5
# maxRadius = 0
def circleScan(frame, camX, camY):
gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
blurred = cv2.GaussianBlur(gray,(11,11),0)
circles = cv2.HoughCircles(blurred, cv2.HOUGH_GRADIENT, 1,120, param1=220, param2=30, minRadius=50, maxRadius=300)
# circles = cv2.HoughCircles(gray, cv2.HOUGH_GRADIENT, 1, minDist, param1=param1, param2=param2, minRadius=minRadius, maxRadius=maxRadius)
if circles is not None:
circles = np.round(circles[0, :]).astype("int")
for (x, y, r) in circles:
cv2.circle(frame, (x, y), r, (0, 255, 0), 4)
cv2.rectangle(frame, (x - 5, y - 5),
(x + 5, y + 5), (0, 128, 255), -1)
x = x - camX/2
y = (y - camY/2) * -1
return [x,y]
# circles = cv2.HoughCircles(gray, cv2.HOUGH_GRADIENT, 1.2, 100)
|
Python
| 28
| 32.642857
| 142
|
/Proje/Daire/daire.py
| 0.585987
| 0.504246
|
serhatkg021/parthenia
|
refs/heads/master
|
import cv2
# import numpy as np
import keyboard
import os
cameraX = 800
cameraY = 600
cap = cv2.VideoCapture(0)
while(True):
if keyboard.is_pressed("1"):
print("1. Görev Dikdortgende")
cap.release()
cv2.destroyAllWindows()
os.system('..\\daire\\main.py') # Daha verimli
break
if keyboard.is_pressed("3"):
print("3. Görev Dikdortgende")
break
ret, image = cap.read()
image = cv2.resize(image, (cameraX, cameraY))
original = image.copy()
cv2.rectangle(original, (395, 295),
(405, 305), (0, 128, 50), -1)
blurred = cv2.medianBlur(image, 3)
# blurred = cv2.GaussianBlur(hsv,(3,3),0)
hsv = cv2.cvtColor(blurred, cv2.COLOR_BGR2HSV)
mask = cv2.inRange(hsv,(15,0,0), (29, 255, 255))
cnts,_ = cv2.findContours(mask, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
minArea = []
minC = []
for c in cnts:
area = cv2.contourArea(c)
if area > 400:
approx = cv2.approxPolyDP(c, 0.125 * cv2.arcLength(c, True), True)
if(len(approx) == 4):
minArea.append(area)
minC.append([area, c])
if minArea:
minArea.sort()
print(minArea)
mArea = minArea[0]
mC = []
for x in minC:
if x[0] == mArea:
mC = x[1]
M = cv2.moments(mC)
cx = int(M['m10']/M['m00'])
cy = int(M['m01']/M['m00'])
x = cx - cameraX/2
y = (cy - cameraY/2) * -1
print(cx, cy , x , y)
cv2.rectangle(original, (cx - 5, cy - 5),
(cx + 5, cy + 5), (0, 128, 255), -1)
cv2.drawContours(original, [approx], 0, (0, 0, 255), 5)
cv2.imshow('mask', mask)
cv2.imshow('original', original)
if cv2.waitKey(1) & 0xFF == ord('q'):
cap.release()
break
cv2.destroyAllWindows()
|
Python
| 70
| 25.942858
| 78
|
/Proje/Dikdörtgen/main.py
| 0.508223
| 0.448276
|
LeonardoZanotti/opencv-logic-operations
|
refs/heads/main
|
import cv2 as cv
import numpy as np
import sys
from matplotlib import pyplot as plt
def main():
square = cv.imread('./img/square.png')
ball = cv.imread('./img/ball.png')
mask = cv.imread('./img/mask2.png')
square_gray = cv.cvtColor(square, cv.COLOR_BGR2GRAY)
ball_gray = cv.cvtColor(ball, cv.COLOR_BGR2GRAY)
args = sys.argv
if (len(args) > 1):
if (args[1] == 'add'):
title = 'Sum'
image = add(square, ball)
elif (args[1] == 'sub'):
title = 'Subtraction'
image = sub(square, ball)
elif (args[1] == 'mult'):
title = 'Multiplication'
image = mult(square, ball)
elif (args[1] == 'div'):
title = 'Division'
image = div(square, ball)
elif (args[1] == 'and'):
title = 'And operation'
image = andF(square, ball)
elif (args[1] == 'or'):
title = 'Or operation'
image = orF(square, ball)
elif (args[1] == 'xor'):
title = 'Xor operation'
image = xorF(square, ball)
elif (args[1] == 'not'):
title = 'Not operation'
image = notF(square, ball)
elif (args[1] == 'blur'):
title = 'Blur'
image = blur(mask)
elif (args[1] == 'box'):
title = 'Box filter'
image = box(mask)
elif (args[1] == 'median'):
title = 'Median filter'
image = median(mask)
elif (args[1] == 'dd'):
title = '2D filter'
image = dd(mask)
elif (args[1] == 'gaussian'):
title = 'Gaussian filter'
image = gaussian(mask)
elif (args[1] == 'bilateral'):
title = 'Bilateral filter'
image = bilateral(mask)
else:
print('(!) -- Error - no operation called')
exit(0)
if (len(args) > 2 and args[2] == 'special'):
original = mask if args[1] == 'blur' or args[1] == 'box' or args[1] == 'median' or args[1] == 'dd' or args[1] == 'gaussian' or args[1] == 'bilateral' else square
plt.subplot(121),plt.imshow(original),plt.title('Original')
plt.xticks([]), plt.yticks([])
plt.subplot(122),plt.imshow(image),plt.title(title)
plt.xticks([]), plt.yticks([])
plt.show()
else:
cv.imshow(title, image)
cv.waitKey(15000)
cv.destroyAllWindows()
else:
print('(!) -- Error - no operation called')
exit(0)
def add(image1, image2):
# return cv.add(image1, image2, 0)
return cv.addWeighted(image1, 0.7, image2, 0.3, 0)
def sub(image1, image2):
return cv.subtract(image1, image2, 0)
def mult(image1, image2):
return cv.multiply(image1, image2)
def div(image1, image2):
return cv.divide(image1, image2)
def andF(image1, image2):
return cv.bitwise_and(image1, image2)
def orF(image1, image2):
return cv.bitwise_or(image1, image2)
def xorF(image1, image2):
return cv.bitwise_xor(image1, image2)
def notF(image1, image2):
return cv.bitwise_not(image1)
def blur(image1):
return cv.blur(image1, (5, 5))
def box(image1):
return cv.boxFilter(image1, 50, (5, 5), False)
def median(image1):
return cv.medianBlur(image1, 5)
def dd(image1):
kernel = np.ones((5,5),np.float32)/25
return cv.filter2D(image1, -1, kernel)
def gaussian(image1):
return cv.GaussianBlur(image1, (5, 5), 0)
def bilateral(image1):
return cv.bilateralFilter(image1, 9, 75, 75)
if __name__ == '__main__':
main()
|
Python
| 122
| 28.745901
| 173
|
/logic-arithmetic.py
| 0.537063
| 0.505098
|
johinsDev/codewars
|
refs/heads/master
|
"""
Going to zero or to infinity?
http://www.codewars.com/kata/55a29405bc7d2efaff00007c/train/python
"""
import math
def going(n):
result = 0
for i in range(n):
result = 1.0*result/(i+1) + 1
return math.floor(result * (10**6))/(10**6)
if __name__ == "__main__":
for i in range(10):
print i, going(i)
|
Python
| 18
| 16.5
| 66
|
/going_to_zero_or_inf.py
| 0.627389
| 0.541401
|
johinsDev/codewars
|
refs/heads/master
|
'''
A poor miner is trapped in a mine and you have to help him to get out !
Only, the mine is all dark so you have to tell him where to go.
In this kata, you will have to implement a method solve(map, miner, exit) that has to return the path the miner must take to reach the exit as an array of moves, such as : ['up', 'down', 'right', 'left']. There are 4 possible moves, up, down, left and right, no diagonal.
map is a 2-dimensional array of boolean values, representing squares. false for walls, true for open squares (where the miner can walk). It will never be larger than 5 x 5. It is laid out as an array of columns. All columns will always be the same size, though not necessarily the same size as rows (in other words, maps can be rectangular). The map will never contain any loop, so there will always be only one possible path. The map may contain dead-ends though.
miner is the position of the miner at the start, as an object made of two zero-based integer properties, x and y. For example {x:0, y:0} would be the top-left corner.
exit is the position of the exit, in the same format as miner.
Note that the miner can't go outside the map, as it is a tunnel.
Let's take a pretty basic example :
map = [[True, False],
[True, True]];
solve(map, {'x':0,'y':0}, {'x':1,'y':1})
// Should return ['right', 'down']
http://www.codewars.com/kata/5326ef17b7320ee2e00001df/train/python
'''
def solve(map, miner, exit):
#your code here
dirc = { 'right': [1,0],
'left': [-1,0],
'down': [0,1],
'up': [0,-1] }
matrix = [[ int(map[i][j]) for i in range(len(map)) ] for j in range(len(map[0]))]
start = [ value for key , value in miner.itemiters() ]
end = [ value for key , value in exit.itemiters() ]
print start
|
Python
| 41
| 41.634148
| 464
|
/escape_the_mines.py
| 0.691076
| 0.671053
|
johinsDev/codewars
|
refs/heads/master
|
"""
You have to create a function that takes a positive integer number and returns the next bigger number formed by the same digits:
http://www.codewars.com/kata/55983863da40caa2c900004e/train/python
"""
def next_bigger(n):
#your code here
|
Python
| 10
| 23.799999
| 128
|
/next_bigger.py
| 0.746032
| 0.678571
|
johinsDev/codewars
|
refs/heads/master
|
"""
Sudoku Solution Validator
http://www.codewars.com/kata/529bf0e9bdf7657179000008/train/python
"""
def validSolution(board):
test = range(1,10,1)
def tester(alist):
return set(test)==set(alist)
for i in range(len(board)):
tem = board[i]
if not tester(tem):
return False
for i in range(len(board[0])):
if not tester([alist[i] for alist in board]):
return False
for i in range(3):
for j in range(3):
if not tester(sum([alist[j*3:j*3+3] for alist in board[i*3:i*3+3]] , [])):
return False
return True
boardOne = [[5, 3, 4, 6, 7, 8, 9, 1, 2],
[6, 7, 2, 1, 9, 0, 3, 4, 8],
[1, 0, 0, 3, 4, 2, 5, 6, 0],
[8, 5, 9, 7, 6, 1, 0, 2, 0],
[4, 2, 6, 8, 5, 3, 7, 9, 1],
[7, 1, 3, 9, 2, 4, 8, 5, 6],
[9, 0, 1, 5, 3, 7, 2, 1, 4],
[2, 8, 7, 4, 1, 9, 6, 3, 5],
[3, 0, 0, 4, 8, 1, 1, 7, 9]]
boardTwo =[[5, 3, 4, 6, 7, 8, 9, 1, 2],
[6, 7, 2, 1, 9, 5, 3, 4, 8],
[1, 9, 8, 3, 4, 2, 5, 6, 7],
[8, 5, 9, 7, 6, 1, 4, 2, 3],
[4, 2, 6, 8, 5, 3, 7, 9, 1],
[7, 1, 3, 9, 2, 4, 8, 5, 6],
[9, 6, 1, 5, 3, 7, 2, 8, 4],
[2, 8, 7, 4, 1, 9, 6, 3, 5],
[3, 4, 5, 2, 8, 6, 1, 7, 9]]
print validSolution(boardOne)
print validSolution(boardTwo)
|
Python
| 53
| 25.283018
| 77
|
/sudoku_solution_val.py
| 0.41954
| 0.280891
|
johinsDev/codewars
|
refs/heads/master
|
"""
Decimal to any Rational or Irrational Base Converter
http://www.codewars.com/kata/5509609d1dbf20a324000714/train/python
wiki_page : https://en.wikipedia.org/wiki/Non-integer_representation
"""
import math
from math import pi , log
'''
def converter(n, decimals=0, base=pi):
"""takes n in base 10 and returns it in any base (default is pi
with optional x decimals"""
#your code here
alpha = '0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZ'
m = 1
if n < 0:
n = -n
m = -m
times = 0 if n == 0 else int(math.floor(math.log(n, base)))
result = ''
while times >= -decimals :
if times == -1:
result += '.'
val = int(n / base**times)
result+=alpha[val]
#print "base time " ,n/(base**times)
n -= int(n / base**times) * base**times
#print result,n , times
times-=1
if m == -1:
result = '-'+result
result = str(result)
if decimals != 0:
loc = result.index('.')
last = len(result)-1
if decimals > last - loc:
result+='0'* (decimals-(last - loc))
return result
'''
def converter(n , decimals = 0 , base = pi):
alpha = '0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZ'
if n == 0 : return '0' if not decimals else '0.' + '0'*decimals
result = '' if n > 0 else '-'
n = abs(n)
for i in range(int(log(n, base)) , -decimals -1, -1 ):
if i == -1:
result += '.'
result += alpha[int(n / base**i)]
n %= base**i
return result
def main():
print converter(0,4,26)
print converter(-15.5,2,23)
print converter(13,0,10)
print converter(5.5, 1,10)
if __name__ == '__main__':
main()
|
Python
| 71
| 23.478872
| 69
|
/decimalToRational.py
| 0.541021
| 0.49455
|
johinsDev/codewars
|
refs/heads/master
|
'''
http://www.codewars.com/kata/53d3173cf4eb7605c10001a8/train/python
Write a function that returns all of the sublists of a list or Array.
Your function should be pure; it cannot modify its input.
Example:
power([1,2,3])
# => [[], [1], [2], [1, 2], [3], [1, 3], [2, 3], [1, 2, 3]]
'''
def power(s):
"""Computes all of the sublists of s"""
length = len(s)
count = 2**length
result = []
for i in range(count):
st = str(bin(i)[2:]).zfill(length)
temp = []
for j in range(length):
if st[length - 1 - j] == str(1):
temp.append(s[j])
result.append(temp)
return result
def powersetlist(s):
r = [[]]
for e in s:
# print "r: %-55r e: %r" % (r,e)
r += [x+[e] for x in r]
return r
#print "\npowersetlist(%r) =\n %r" % (s, powersetlist(s))
#print power([0,1,2,3])
if __name__ == '__main__':
print power([0,1,2,3])
|
Python
| 40
| 21.625
| 69
|
/powerSet.py
| 0.539648
| 0.488987
|
johinsDev/codewars
|
refs/heads/master
|
def solution(n):
# TODO convert int to roman string
result = ""
remainder = n
if n == 0:
return ""
for i in range(0,len(roman_number)):
time = 1.0*remainder/roman_number[i][0]
if str(roman_number[i][0])[0] == '1':
if time < 4 and time >=1:
temp = remainder % roman_number[i][0]
div = remainder / roman_number[i][0]
remainder = temp
result += div * roman_number[i][1]
if time < 1 and time >= 0.9:
result += (roman_number[i+2][1]+roman_number[i][1])
remainder = remainder % roman_number[i+2][0]
else:
if time < 1 and time >= 0.8:
result += (roman_number[i+1][1]+roman_number[i][1])
remainder = remainder % roman_number[i+1][0]
if time >= 1 and time < 1.8:
div = (remainder - roman_number[i][0]) / roman_number[i+1][0]
result += roman_number[i][1] + div * roman_number[i+1][1]
remainder = remainder % roman_number[i+1][0]
if time >= 1.8:
result += roman_number[i+1][1]+roman_number[i-1][1]
remainder = remainder % roman_number[i+1][0]
return result
roman_number = [(1000, 'M'), (500, 'D'), (100, 'C'), (50, 'L'), (10, 'X'), (5, 'V'), (1, 'I')]
#print solution(4)
#print solution(6)
print solution(3991)
|
Python
| 37
| 31.270269
| 94
|
/roman_number.py
| 0.581727
| 0.523051
|
johinsDev/codewars
|
refs/heads/master
|
#!/usr/bin/python
'''
An Arithmetic Progression is defined as one in which there is a constant difference between the consecutive terms of a given series of numbers. You are provided with consecutive elements of an Arithmetic Progression. There is however one hitch: Exactly one term from the original series is missing from the set of numbers which have been given to you. The rest of the given series is the same as the original AP. Find the missing term.
You have to write the function findMissing (list) , list will always be atleast 3 numbers.
http://www.codewars.com/kata/52de553ebb55d1fca3000371/train/python
'''
def find_missing(sequence):
should = 1.0 * (sequence[0] + sequence[-1])* (len(sequence)+1) / 2
actual = reduce(lambda x, y: x+y, sequence)
#print actual
return int(should - actual)
if __name__ == "__main__":
a = [1, 2, 3, 4, 6, 7, 8, 9]
print find_missing(a)
|
Python
| 23
| 37.869564
| 435
|
/find_missing.py
| 0.731243
| 0.697648
|
johinsDev/codewars
|
refs/heads/master
|
"""
Square into Squares. Protect trees!
http://www.codewars.com/kata/square-into-squares-protect-trees
"""
import math
def decompose(n):
# your code
def sub_decompose(s,i):
if s < 0 :
return None
if s == 0:
return []
for j in xrange(i-1, 0 ,-1):
#print s,s - j**2 ,j
sub = sub_decompose(s - j**2, j)
#print j,sub
if sub != None:
# print s,j,sub
return sub + [j]
return sub_decompose(n**2,n)
if __name__ == "__main__":
print decompose(11)
|
Python
| 24
| 20.708334
| 63
|
/square_into_squares.py
| 0.541267
| 0.522073
|
johinsDev/codewars
|
refs/heads/master
|
#!/usr/bin/python
from collections import defaultdict
def sum_for_list(lst):
aDict = defaultdict(lambda : 0)
def primes(n):
d = 2
aN = n
n = abs(n)
while d*d <= n:
aBool = True
while (n % d) == 0:
#primfac.add(d) # supposing you want multiple factors repeated
if aBool:
aDict[d] += aN
aBool = False
n /= d
d += 1
if n > 1:
aDict[n] += aN
return aDict
for i in lst:
primes(i)
#primes(i)
result = [ [k,v] for k,v in aDict.iteritems()]
result.sort(key = lambda x:x[0])
return result
a = [12,15]
b = [15, 30, -45]
c = [15, 21, 24, 30, 45]
test = sum_for_list(b)
#print test
#print sum_for_list(a)
d = sum_for_list(c)
print d
d.sort(key = lambda x: x[0] ,reverse =True)
print d
|
Python
| 47
| 14.957447
| 68
|
/sum_for_list.py
| 0.573901
| 0.537949
|
johinsDev/codewars
|
refs/heads/master
|
"""
Create the function prefill that returns an array of n elements that all have the same value v. See if you can do this without using a loop.
You have to validate input:
v can be anything (primitive or otherwise)
if v is ommited, fill the array with undefined
if n is 0, return an empty array
if n is anything other than an integer or integer-formatted string (e.g. '123') that is >=0, throw a TypeError
When throwing a TypeError, the message should be n is invalid, where you replace n for the actual value passed to the function.
see: http://www.codewars.com/kata/54129112fb7c188740000162/train/python
"""
def prefill(n,v=None):
#your code here
try:
if isNumber(n):
if v is None:
return ['undefined'] * int(n)
return [v]*int(n)
raise TypeError
except TypeError:
return str(n) + " is invalid."
def isNumber(n):
if isinstance( n, int ):
return True
elif isinstance( n , str) and n.isdigit():
if int(n):
return True
return False
print prefill(5,)
print prefill(5,prefill(3,'abc'))
print prefill(3,5)
print isNumber(5.3)
|
Python
| 43
| 24.39535
| 140
|
/prefillAnArray.py
| 0.696249
| 0.666057
|
johinsDev/codewars
|
refs/heads/master
|
"""
Vigenere Autokey Cipher Helper
http://www.codewars.com/kata/vigenere-autokey-cipher-helper
"""
class VigenereAutokeyCipher:
def __init__(self, key, alphabet):
self.key = key
self.alphabet = alphabet
def code(self, text, direction):
toText = list(text)
result = []
newKey = filter(lambda x: (x in self.alphabet) == True, list(self.key)) #+ filter(lambda x: (x in self.alphabet) == True, toEncode)
#print 'new' ,newKey
j = 0
for i in range(len(toText)):
#print i ,self.key[i%(len(self.key))]
if toText[i] in self.alphabet:
if direction:
newKey.append(toText[i])
result.append(self.alphabet[(self.alphabet.index(toText[i]) + self.alphabet.index(newKey[j]))%len(self.alphabet)])
else:
result.append(self.alphabet[(self.alphabet.index(toText[i]) - self.alphabet.index(newKey[j]))%len(self.alphabet)])
newKey.append(result[-1])
j += 1
else:
result.append(toText[i])
return ''.join(result)
def encode(self, toEncode):
return self.code(toEncode,1)
def decode(self, toDecode):
return self.code(toDecode, 0)
def main():
alphabet = 'abcdefghijklmnopqrstuvwxyz'
#alphabet = 'abcdefgh'
key = 'password'
tester = VigenereAutokeyCipher(key,alphabet)
print tester.encode('codewars')
print tester.encode('amazingly few discotheques provide jukeboxes')
print 'pmsrebxoy rev lvynmylatcwu dkvzyxi bjbswwaib'
print tester.decode('pmsrebxoy rev lvynmylatcwu dkvzyxi bjbswwaib')
print 'amazingly few discotheques provide jukeboxes'
if __name__ == '__main__':
main()
|
Python
| 57
| 26.263159
| 134
|
/vigenereAutokeyCipher.py
| 0.692209
| 0.688989
|
johinsDev/codewars
|
refs/heads/master
|
'''
Where my anagrams at?
http://www.codewars.com/kata/523a86aa4230ebb5420001e1/train/python
Also could construct prime list, assign each character from word to a prime number. multiply them
then divid prime number from word in words.
'''
def anagrams(word, words):
#your code here
return filter(lambda x: sorted(x) == sorted(word) , words)
print anagrams("thisis" , ["thisis", "isthis", "thisisis"])
print anagrams('racer', ['crazer', 'carer', 'racar', 'caers', 'racer'])
print anagrams('laser', ['lazing', 'lazy', 'lacer'])
|
Python
| 17
| 31.411764
| 99
|
/where_my_anagrams.py
| 0.687273
| 0.656364
|
johinsDev/codewars
|
refs/heads/master
|
def sierpinski(n):
result = []
for i in range(0,n+1):
if i == 0:
result.append('"')
else:
for j in range(2**(i-1),2**i):
result.append(addSpace(j,i,result))
r = result[0]
for line in result[1:]:
r= r+'\n'+line
return r
def addSpace(l,n,string_list):
result = string_list[l-2**(n-1)]
space = len(range(0,2*2**(n-1)-l)) * 2 - 1
for i in range(0,space):
result = ' ' +result
return string_list[l-2**(n-1)]+result
#print sierpinski(1)
print sierpinski(6)
|
Python
| 24
| 22.416666
| 49
|
/Sierpinski's Gasketr.py
| 0.511586
| 0.474153
|
johinsDev/codewars
|
refs/heads/master
|
"""
Validate Sudoku with size `NxN`
http://www.codewars.com/kata/540afbe2dc9f615d5e000425/train/python
"""
|
Python
| 4
| 26.25
| 67
|
/validate_sudoku.py
| 0.745455
| 0.609091
|
johinsDev/codewars
|
refs/heads/master
|
"""
The Millionth Fibonacci Kata
http://www.codewars.com/kata/53d40c1e2f13e331fc000c26/train/python
"""
import math
import sys
import time
from collections import defaultdict
# following not working , took too much time to compute.
def fib(n , i):
dic = defaultdict(list)
def find_dim(k):
if k == 0:
return []
if k == 1:
return [0]
else:
return [int(math.log(k,2))] + find_dim(k - 2**(int(math.log(k,2))))
def matrix_multi(a, b):
return [a[0]*b[0]+a[1]*b[2],
a[0]*b[1]+a[1]*b[3],
a[2]*b[0]+a[3]*b[2],
a[2]*b[1]+a[3]*b[3]]
def matrix_power(pow):
a = [1,1,1,0]
if pow in dic:
return dic[pow]
else:
if pow == 0:
return a
else:
for i in range(1,pow+1):
if i not in dic:
a = matrix_multi(a , a)
dic[i] = a
else:
a = dic[i]
return a
#print matrix_power([1,1,1,0])
def matrix_fib(t):
if t == 0 or t == 1:
return t
else:
result = [1,0,0,1]
alist = find_dim(t-1)
for i in alist:
result = matrix_multi(result,matrix_power(i))
return result
def dynamic_fib(n):
a = 0
b = 1
if n == 0:
return (a , b)
for i in range(n):
temp = a + b
a = b
b = temp
return (a , b )
def double_fast(n):
#really fast
if n == 0:
return (0 , 1)
else:
a, b = double_fast(n/2)
c = a * (2* b -a )
d = b **2 + a**2
if n%2 == 0:
return (c , d)
else:
return (d , d+c)
def compute_fib(n ,i ):
func = {0: matrix_fib,
1: double_fast,
2: dynamic_fib }
return func[i](n)[0] if n >= 0 else (-1)**(n%2+1) * func[i](-n)[0]
return compute_fib(n , i)
def size_base10(n):
size = 0
while n /10 != 0:
size += 1
n = n/10
return size
def main():
'''
func = {0: matrix_fib,
1: double_fast,
2: dynamic_fib }
'''
try:
#var = int(raw_input("Please enter the n-th Fib number you want:"))
var = 200000
start = time.time()
i = 1
result = fib(var , i)
end = time.time()
#print "Lenght of %dth fib number is %d" %(var , size_base10(result))
print "Time is %s seconds." % (end - start)
#print result
#print "The %dth fib number is %d"%(var , result)
except:
pass
if __name__ == '__main__':
main()
|
Python
| 134
| 15.798508
| 72
|
/millionthFib.py
| 0.525732
| 0.482254
|
bhaktijkoli/python-training
|
refs/heads/master
|
# Generate a random number between 1 and 9 (including 1 and 9).
# Ask the user to guess the number, then tell them whether they
# guessed too low, too high, or exactly right.
import random as r
a = r.randint(1, 9)
def ask_user():
u = int(input("Guess the number?\n"))
if a == u:
print("Exactly")
elif a > u:
print("Too low")
ask_user()
else:
print("Too high")
ask_user()
ask_user()
|
Python
| 20
| 21.1
| 63
|
/example25.py
| 0.562771
| 0.549784
|
bhaktijkoli/python-training
|
refs/heads/master
|
# Consider that vowels in the alphabet are a, e, i, o, u and y.
# Function score_words takes a list of lowercase words as an
# argument and returns a score as follows:
# The score of a single word is 2 if the word contains an even number
# of vowels. Otherwise, the score of this word is 1 . The score for the
# whole list of words is the sum of scores of all words in the list.
# Debug the given function score_words such that it returns a correct
# score.
# Rules:
# even number of vowels then score is 2
# odd number of vowels then score is 1
vowels = ["a", "e", "i", "o", "u"]
def score_word(word):
v = 0
for c in word:
if c in vowels:
v += 1
if v % 2 == 0:
return 2
else:
return 1
def score_words(words):
score = 0;
for word in words:
score += score_word(word)
return score
sentance = input("Enter a sentance\n")
words = sentance.split(" ")
print(score_words(words))
|
Python
| 35
| 26.142857
| 71
|
/example26.py
| 0.623601
| 0.612411
|
bhaktijkoli/python-training
|
refs/heads/master
|
p = 3
n = 1
for i in range(4):
for j in range(7):
if j >= p and j <= p+n-1:
print("X", end=" ")
else:
print(" ", end=" ")
print()
p -= 1
n += 2
print("The python string multiplication way")
p = 3
n = 1
for i in range(4):
print(" " * p, end="")
print("X " * n, end="")
print()
p -= 1
n += 2
|
Python
| 22
| 15.772727
| 45
|
/example10.py
| 0.385604
| 0.354756
|
bhaktijkoli/python-training
|
refs/heads/master
|
import datetime as dt
today = dt.datetime.today()
for i in range(1, 6):
nextday = today + dt.timedelta(days=i)
print(nextday)
|
Python
| 5
| 25.799999
| 42
|
/example18.py
| 0.664234
| 0.649635
|
bhaktijkoli/python-training
|
refs/heads/master
|
# Given the participants' score sheet for your University Sports Day,
# you are required to find the runner-up score. You are given n scores.
# Store them in a list and find the score of the runner-up.
score_str = input("Enter scores\n")
score_list = score_str.split(" ")
highestScore = 0;
rupnnerUp = 0
for score in score_list:
score = int(score)
if score > highestScore:
highestScore = score
for score in score_list:
score = int(score)
if score > rupnnerUp and score < highestScore:
rupnnerUp = score
print(rupnnerUp)
|
Python
| 19
| 28.210526
| 71
|
/example23.py
| 0.678322
| 0.674825
|
bhaktijkoli/python-training
|
refs/heads/master
|
# To find a factorial of a number
# 5! = 5 * 4 * 3 * 2 * 1
# fact(5) = 5 * fact(4)
# fact(4) = 4 * fact(3)
# fact(3) = 3 * fact(2)
# fact(2) = 2 * fact(1)
# fact(1) = 1
# fact(5) = 5 * 4 * 3 * 2 * 1
def fact(n):
if n == 1:
return 1
return n * fact(n-1)
n = 5
result = fact(n)
print(result)
|
Python
| 19
| 15.315789
| 33
|
/example13.py
| 0.449541
| 0.357798
|
bhaktijkoli/python-training
|
refs/heads/master
|
# GUI Programing
# Tkinter
import tkinter as tk
from tkinter import messagebox
## Welcome Window
def show_welcome():
welcome = tk.Tk()
welcome.title("Welcome ADMIN")
welcome.geometry("200x200")
welcome.mainloop()
## Login Window
# 1. Intialize Root Window
root = tk.Tk()
root.title("Login Application")
root.geometry("200x200")
# 2. Application Logic
def button1Click():
username = entry1.get()
password = entry2.get()
if username == 'admin' and password == 'admin':
messagebox.showinfo("Login Application", "Login Successfull!")
root.destroy()
show_welcome()
else:
messagebox.showerror("Login Application", "Login Failed!")
def button2Click():
if messagebox.askokcancel("Login Application", "Do you want to quit?"):
root.destroy()
# 3. Intialize widgets
label1 = tk.Label(root, text="Username")
label2 = tk.Label(root, text="Password")
entry1 = tk.Entry(root)
entry2 = tk.Entry(root)
button1 = tk.Button(root, text="Login", command=button1Click)
button2 = tk.Button(root, text="Quit", command=button2Click)
# 4. Placement of widgets (pack, grid, place)
label1.grid(row=1, column=1, pady=10)
label2.grid(row=2, column=1, pady=10)
entry1.grid(row=1, column=2)
entry2.grid(row=2, column=2)
button1.grid(row=3, column=2)
button2.grid(row=3, column=1)
# 5. Running the main looper
root.mainloop()
print("END")
|
Python
| 54
| 24.851852
| 75
|
/example21.py
| 0.664365
| 0.629834
|
bhaktijkoli/python-training
|
refs/heads/master
|
import datetime as dt
today = dt.datetime.today()
yesterday = today - dt.timedelta(days=1)
tomorrow = today + dt.timedelta(days=1)
print("Yesterday", yesterday)
print("Today", today)
print("Tomorrow", tomorrow)
|
Python
| 8
| 25.5
| 40
|
/example17.py
| 0.715596
| 0.706422
|
bhaktijkoli/python-training
|
refs/heads/master
|
# Make a two-player Rock-Paper-Scissors game. (Hint: Ask for player
# plays (using input), compare them, print out a message of
# congratulations to the winner, and ask if the players want to start a
# new game)
def is_play_valid(play):
if play != 'rock' and play != 'paper' and play != 'scissors':
return False
else:
return True
def play_game():
p1 = input("Player 1, what are you playing?\n")
while not is_play_valid(p1):
p1 = input("Wrong play, please play again.\n")
p2 = input("Player 2, what are you playing?\n")
while not is_play_valid(p2):
p2 = input("Wrong play, please play again.\n")
# Game Logic
if p1 == p2:
print("Its a tie!")
elif p1 == "rock":
if p2 == 'scissors':
print("Player 1 wins")
else:
print("Player 2 wins")
elif p1 == "paper":
if p2 == "rock":
print("Player 1 wins")
else:
print("Player 2 wins")
else:
if p2 == 'paper':
print("Player 1 wins")
else:
print("Player 2 wins")
ans = input("Do you want to start a new game?\n")
if ans == 'yes':
print("Starting a new game")
play_game()
play_game()
|
Python
| 45
| 26.822222
| 71
|
/example24.py
| 0.52973
| 0.513514
|
bhaktijkoli/python-training
|
refs/heads/master
|
l = [1, 5, 12, 2, 15, 6]
i = 0
s = 0
for i in l:
s += i
print(s)
i = 0
s = 0
while i<len(l):
s += l[i]
i += 1
print(s)
|
Python
| 13
| 9.153846
| 24
|
/example2.py
| 0.370629
| 0.27972
|
bhaktijkoli/python-training
|
refs/heads/master
|
def add(a, b):
return a + b
def sub(a, b):
return a - b
def pow(a,b):
return a ** b
if __name__ != "__main__":
print("Basic Module Imported")
|
Python
| 11
| 13.636364
| 34
|
/basic.py
| 0.482353
| 0.482353
|
bhaktijkoli/python-training
|
refs/heads/master
|
x = int(input("Enter a number\n"))
for i in range(x):
print(i ** 2)
|
Python
| 3
| 23
| 34
|
/example5.py
| 0.546667
| 0.533333
|
bhaktijkoli/python-training
|
refs/heads/master
|
x = 65
for i in range(5):
for j in range(i+1):
print(chr(x+j), end=" ")
print()
|
Python
| 6
| 15.166667
| 32
|
/example9.py
| 0.455446
| 0.415842
|
bhaktijkoli/python-training
|
refs/heads/master
|
import datetime as dt
today = dt.datetime.today()
print("Current date and time", dt.datetime.now())
print("Current Time in 12 Hours Format", today.strftime("%I:%M:%S %p"))
print("Current year", today.year)
print("Month of the year", today.strftime("%B"))
print("Week number of the year", today.strftime("%W"))
print("Week day of the week", today.strftime("%A"))
print("Day of the year", today.strftime("%j"))
print("Day of the month", today.strftime("%d"))
print("Day of the week", today.strftime("%w"))
|
Python
| 11
| 44.81818
| 71
|
/example15.py
| 0.676413
| 0.672515
|
bhaktijkoli/python-training
|
refs/heads/master
|
x = int(input("Enter the value of X\n"))
if x%2 != 0:
print("Weird")
elif x >= 2 and x <= 5:
print("Not Weird")
elif x >= 6 and x<= 20:
print("Weird")
elif x > 20:
print("Not Weird")
|
Python
| 9
| 21.111111
| 40
|
/example1.py
| 0.519417
| 0.475728
|
bhaktijkoli/python-training
|
refs/heads/master
|
samples = (1, 2, 3, 4, 12, 5, 20, 11, 21)
e = o = 0
for s in samples:
if s % 2 == 0:
e += 1
else:
o += 1
print("Number of even numbers : %d" % (e))
print("Number of odd numbers : %d" % (o))
|
Python
| 10
| 20.5
| 42
|
/example6.py
| 0.44843
| 0.367713
|
bhaktijkoli/python-training
|
refs/heads/master
|
# Password generator
import random as r
lenth = int(input("Enter the length of password\n"))
password = ""
for i in range(lenth):
password += chr(r.randint(33, 123))
print(password)
|
Python
| 8
| 22.375
| 52
|
/example27.py
| 0.678756
| 0.65285
|
bhaktijkoli/python-training
|
refs/heads/master
|
#Write a python program to find the longest word in a file.
f = open("demo.txt", "r")
line = f.readline()
longestWord = ""
while line:
words = line.split(" ")
lineLongestWord = max(words, key=len)
if len(lineLongestWord) > len(longestWord):
longestWord = lineLongestWord
line = f.readline()
print("Longest word")
print(longestWord)
|
Python
| 14
| 24.571428
| 59
|
/example19.py
| 0.648649
| 0.648649
|
bhaktijkoli/python-training
|
refs/heads/master
|
# 0 1 1 2 3 5 8
def fib(n):
a = 0
b = 1
print(a, end=" ")
print(b, end=" ")
for i in range(2, n):
c = a + b
print(c, end=" ")
a = b
b = c
n = int(input("Enter the number\n"))
fib(n)
|
Python
| 14
| 15.785714
| 36
|
/example12.py
| 0.376518
| 0.336032
|
bhaktijkoli/python-training
|
refs/heads/master
|
# GUI Calculator Program
import tkinter as tk
# Intialize window
window = tk.Tk()
window.title("Calculator")
# Application Logic
result = tk.StringVar()
def add(value):
result.set(result.get() + value)
def peform():
result.set(eval(result.get()))
def clear():
result.set("")
# Initialize Widgets
label1 = tk.Label(window, textvariable=result)
button1 = tk.Button(window, text="1", padx=10, pady=10, bg="white", fg="black", command=lambda : add("1"))
button2 = tk.Button(window, text="2", padx=10, pady=10, bg="white", fg="black",command=lambda : add("2"))
button3 = tk.Button(window, text="3", padx=10, pady=10, bg="white", fg="black",command=lambda : add("3"))
button4 = tk.Button(window, text="4", padx=10, pady=10, bg="white", fg="black",command=lambda : add("4"))
button5 = tk.Button(window, text="5", padx=10, pady=10, bg="white", fg="black",command=lambda : add("5"))
button6 = tk.Button(window, text="6", padx=10, pady=10, bg="white", fg="black",command=lambda : add("6"))
button7 = tk.Button(window, text="7", padx=10, pady=10, bg="white", fg="black",command=lambda : add("7"))
button8 = tk.Button(window, text="8", padx=10, pady=10, bg="white", fg="black",command=lambda : add("8"))
button9 = tk.Button(window, text="9", padx=10, pady=10, bg="white", fg="black",command=lambda : add("9"))
button0 = tk.Button(window, text="0", padx=10, pady=10, bg="white", fg="black",command=lambda : add("0"))
button_dot = tk.Button(window, text=".", padx=10, pady=10, bg="#eee", fg="black",command=lambda : add("."))
button_equal = tk.Button(window, text="=", padx=10, pady=10, bg="green", fg="white",command=peform)
button_clear = tk.Button(window, text="C", padx=10, pady=10, bg="white", fg="black",command=clear)
button_multiply = tk.Button(window, text="*", padx=10, pady=10, bg="#eee", fg="black",command=lambda : add("*"))
button_minus = tk.Button(window, text="-", padx=10, pady=10, bg="#eee", fg="black",command=lambda : add("-"))
button_add = tk.Button(window, text="+", padx=10, pady=10, bg="#eee", fg="black",command=lambda : add("+"))
# Placement of Widgets
# Row0
label1.grid(row=0, column=0, columnspan=3, sticky="W")
# Row1
button7.grid(row=1, column=0)
button8.grid(row=1, column=1)
button9.grid(row=1, column=2)
button_multiply.grid(row=1, column=3)
# Row2
button4.grid(row=2, column=0)
button5.grid(row=2, column=1)
button6.grid(row=2, column=2)
button_minus.grid(row=2, column=3)
# Row3
button1.grid(row=3, column=0)
button2.grid(row=3, column=1)
button3.grid(row=3, column=2)
button_add.grid(row=3, column=3)
# Row4
button_clear.grid(row=4, column=0)
button0.grid(row= 4, column=1)
button_dot.grid(row= 4, column=2)
button_equal.grid(row= 4, column=3)
# Main Loop
window.mainloop()
|
Python
| 63
| 42.111111
| 112
|
/example22.py
| 0.657904
| 0.60533
|
bhaktijkoli/python-training
|
refs/heads/master
|
#Write a python program to count the numbers of alphabets, digits and spaces in a file.
f = open("demo.txt", "r")
alphabets = 0
digits = 0
spaces = 0
others = 0
lines = f.readlines()
for line in lines:
for c in line:
if c.isalpha():
alphabets += 1
elif c.isdigit():
digits += 1
elif c.isspace():
spaces += 1
else:
others += 1
print("Number of alphabets", alphabets)
print("Number of digits", digits)
print("Number of spaces", spaces)
print("Others", others)
|
Python
| 24
| 21.708334
| 87
|
/example20.py
| 0.56261
| 0.548501
|
bhaktijkoli/python-training
|
refs/heads/master
|
for i in range(5):
for j in range(i+1):
print(i+1, end=" ")
print()
print("The python way...")
for i in range(5):
print(str(str(i+1) + " ") * int(i+1))
|
Python
| 9
| 18.333334
| 41
|
/example8.py
| 0.480663
| 0.447514
|
bhaktijkoli/python-training
|
refs/heads/master
|
for i in range(5):
for j in range(5-i):
print("X", end=" ")
print()
for i in range(5):
print("X " * int(5-i))
|
Python
| 8
| 15.5
| 27
|
/example7.py
| 0.449275
| 0.42029
|
bhaktijkoli/python-training
|
refs/heads/master
|
# GUI Programing
# Tkinter
import tkinter as tk
from tkinter import messagebox
# 1. Intialize Root Window
root = tk.Tk()
root.title("Login Application")
root.geometry("200x200")
# 2. Application Logic
# 3. Intialize widgets
# 4. Placement of widgets (pack, grid, place)
# 5. Running the main looper
root.mainloop()
|
Python
| 20
| 15.1
| 45
|
/demo.py
| 0.691176
| 0.658824
|
bhaktijkoli/python-training
|
refs/heads/master
|
class Student:
def __init__(self, name, roll_no):
self.name = name
self.roll_no = roll_no
self.age = 0
self.marks = 0
def display(self):
print("Name", self.name)
print("Roll No", self.roll_no)
print("Age", self.age)
print("Marks", self.marks)
def setAge(self, age):
self.age = age
def setMarks(self, marks):
self.marks = marks
s1 = Student("Sahil", 12)
s1.setAge(20)
s1.setMarks(90)
s1.display()
s2 = Student("Rohit", 20)
s2.display()
|
Python
| 26
| 19.615385
| 38
|
/example14.py
| 0.530357
| 0.501786
|
bhaktijkoli/python-training
|
refs/heads/master
|
x = input("Enter a string \n")
d = l = 0
for c in x:
if c.isalpha():
l += 1
if c.isdigit():
d += 1;
print("Letters %d" % (l))
print("Digits %d" % (d))
|
Python
| 12
| 13.833333
| 30
|
/example4.py
| 0.425532
| 0.409574
|
bhaktijkoli/python-training
|
refs/heads/master
|
def is_leap(y):
return y % 4 == 0
y = int(input("Enter a year\n"))
if is_leap(y):
print("Leap year")
else:
print("Not a Leap Year")
|
Python
| 8
| 17.125
| 32
|
/example16.py
| 0.543046
| 0.529801
|
bhaktijkoli/python-training
|
refs/heads/master
|
def checkPrime(x):
for i in range(2, x):
if x % i == 0:
print("Not a prime number")
break;
else:
print("Print number")
x = int(input("Enter any number\n"))
checkPrime(x)
|
Python
| 10
| 20.9
| 39
|
/example11.py
| 0.493392
| 0.484582
|
bhaktijkoli/python-training
|
refs/heads/master
|
w = input("Enter a word")
r = "";
for a in w:
r = a + r
print(r)
|
Python
| 6
| 10.666667
| 25
|
/example3.py
| 0.445946
| 0.445946
|
pedromeldola/Desafio
|
refs/heads/master
|
from django.db import models
#criação da classe com os atributos
class Jogo(models.Model):
idJogo = models.AutoField(primary_key=True)
placar = models.IntegerField()
placarMin = models.IntegerField()
placarMax = models.IntegerField()
quebraRecMin = models.IntegerField()
quebraRecMax = models.IntegerField()
def __str__(self):
return str(self.idJogo)
|
Python
| 13
| 28.923077
| 47
|
/core/models.py
| 0.705128
| 0.705128
|
pedromeldola/Desafio
|
refs/heads/master
|
from django.shortcuts import render,redirect
from .models import Jogo
from django.views.decorators.csrf import csrf_protect
#método para chamar todos os objetos que estão na classe Jogo quando entrar na home page
def home_page(request):
jogo = Jogo.objects.all()
return render (request,'home.html',{'jogo':jogo})
#método para inserir os dados na tabela quando o botão ser clicado
def inserir(request):
placar = request.POST.get('nPlacar')
#método para buscar os valores do objeto anterior
try:
placarMin = int(Jogo.objects.earliest('placarMin').placarMin)
placarMax = int(Jogo.objects.latest('placarMax').placarMax)
quebraRecMin = int(Jogo.objects.latest('quebraRecMin').quebraRecMin)
quebraRecMax = int(Jogo.objects.latest('quebraRecMax').quebraRecMax)
except:
placarMin = False
placarMax = False
quebraRecMin = False
quebraRecMax = False
placar = int(placar)
#condição para adicionar o placar nos demais atributos alem dele mesmo
if placarMin is False:
placarMin = placar
placarMax = placar
elif placar < placarMin:
placarMin = placar
quebraRecMin += 1
elif placar > placarMax or placarMax is False:
placarMax = placar
quebraRecMax += 1
else:
quebraRecMin = quebraRecMin+ 0
quebraRecMmax = quebraRecMax+ 0
#método para criar o objeto já com os atributos populados
jogo = Jogo.objects.create(placarMin=placarMin,placar=placar,placarMax=placarMax,quebraRecMin=quebraRecMin,quebraRecMax=quebraRecMax)
return redirect('/') #função para ficar home page após inserir o dado e clica no botão inserir
|
Python
| 44
| 37.886364
| 137
|
/core/views.py
| 0.696669
| 0.694331
|
pedromeldola/Desafio
|
refs/heads/master
|
# Generated by Django 3.1 on 2020-10-01 01:54
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('core', '0001_initial'),
]
operations = [
migrations.RemoveField(
model_name='jogo',
name='id',
),
migrations.AlterField(
model_name='jogo',
name='idJogo',
field=models.AutoField(primary_key=True, serialize=False),
),
migrations.AlterField(
model_name='jogo',
name='placar',
field=models.IntegerField(),
),
]
|
Python
| 27
| 22.185184
| 70
|
/core/migrations/0002_auto_20200930_2254.py
| 0.533546
| 0.504792
|
pedromeldola/Desafio
|
refs/heads/master
|
# Generated by Django 3.1.1 on 2020-09-28 18:50
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Jogo',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('idJogo', models.IntegerField()),
('placar', models.IntegerField(max_length=3)),
('placarMin', models.IntegerField()),
('placarMax', models.IntegerField()),
('quebraRecMin', models.IntegerField()),
('quebraRecMax', models.IntegerField()),
],
),
]
|
Python
| 26
| 28.23077
| 114
|
/core/migrations/0001_initial.py
| 0.544737
| 0.523684
|
memogarcia/pratai-runtimes
|
refs/heads/master
|
import os
import sys
import logging
from time import time
class AppFilter(logging.Filter):
def filter(self, record):
record.function_id = os.environ.get("function_id", 'no_function_id')
record.request_id = os.environ.get("request_id", 'no_request_id')
return True
logger = logging.getLogger('pratai')
logger.setLevel(logging.DEBUG)
# Docker can log stdout and stderr
handler = logging.StreamHandler(sys.stdout)
handler.setLevel(logging.DEBUG)
formatter = logging.Formatter('%(asctime)s - %(function_id)s - %(request_id)s - %(levelname)s - %(message)s')
logger.addFilter(AppFilter())
handler.setFormatter(formatter)
logger.addHandler(handler)
def load_function_from_filesystem(path='/etc/pratai/'):
sys.path.append(path)
from new_module import main
return main
def load_payload():
payload = os.environ.get("pratai_payload", None)
return payload
def execute_function():
f = load_function_from_filesystem()
payload = load_payload()
start = time()
logger.debug("function started with payload {0}".format(str(payload)))
result = None
try:
result = f(payload)
status = 'succeeded'
except Exception as err:
status = 'failed'
logger.error(err.message, exc_info=True)
finish = time()
logger.debug("function {0}, it took {1} seconds with response {2}"
.format(status, str(finish-start), str(result)))
return result
if __name__ == '__main__':
r = execute_function()
|
Python
| 65
| 22.415384
| 109
|
/runtimes/python27/server.py
| 0.662073
| 0.659449
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.