seq_id stringlengths 4 11 | text stringlengths 113 2.92M | repo_name stringlengths 4 125 ⌀ | sub_path stringlengths 3 214 | file_name stringlengths 3 160 | file_ext stringclasses 18
values | file_size_in_byte int64 113 2.92M | program_lang stringclasses 1
value | lang stringclasses 93
values | doc_type stringclasses 1
value | stars int64 0 179k ⌀ | dataset stringclasses 3
values | pt stringclasses 78
values |
|---|---|---|---|---|---|---|---|---|---|---|---|---|
24759580671 | # Dataset Link: https://www.kaggle.com/datasets/abdalrahmanelnashar/credit-card-balance-prediction
#_____________________________________________________________________________________________________
# load libraries
import seaborn as sns
import statsmodels.api as sm
import numpy as np
import matplotlib.pyplot as py
import pandas as pd
from sklearn.preprocessing import LabelEncoder, PolynomialFeatures
from sklearn.linear_model import Lasso, Ridge, LinearRegression, LogisticRegression
from sklearn.decomposition import PCA
from sklearn.metrics import mean_squared_error, r2_score, accuracy_score
from sklearn.metrics import classification_report, confusion_matrix
from sklearn.model_selection import train_test_split
from statsmodels.stats.stattools import durbin_watson
from statsmodels.stats.outliers_influence import variance_inflation_factor
from scipy.optimize import curve_fit
from patsy import dmatrices
#########################################################################################################
#Load Data from 'Task2_Poly_1.csv'
dataFrame = pd.read_csv('Task_4-files/CreditCardBalance.csv')
## convert into specific-numberic values
number = LabelEncoder()
dataFrame['Student'] = number.fit_transform(dataFrame['Student'].astype('str'))
#dataFrame['Gender'] = number.fit_transform(dataFrame['Gender'].astype('str'))
#dataFrame['Married'] = number.fit_transform(dataFrame['Married'].astype('str'))
#dataFrame['Ethnicity'] = number.fit_transform(dataFrame['Ethnicity'].astype('str'))
##################
# Rating - Cards - Education - Student
features = pd.concat([dataFrame.iloc[:,3:5],
dataFrame.iloc[:,3]*dataFrame.iloc[:,4], # rat * card
dataFrame.iloc[:,3]*dataFrame.iloc[:,6], # rat * edu
dataFrame.iloc[:,6], dataFrame.iloc[:,8]], axis=1); target = dataFrame.iloc[:,-1]
#########################################################################################################
## split dataSet
x_tr, x_t, y_tr, y_t = train_test_split(features, target, test_size=0.20, random_state=True)
#_ Linear Regression Model
linear = LinearRegression()
linear.fit(x_tr,y_tr)
y_linear_pre = linear.predict(x_t)
print(' Linear Regression Model ')
print('Coef : ', linear.coef_)
print('Intercept : ', linear.intercept_)
print('MSE : ', mean_squared_error(y_t,y_linear_pre))
print('R2-Score : ', r2_score(y_t,y_linear_pre))
print('^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^')
#_ Lasso Regression Model
lasso = Lasso(alpha=1)
lasso.fit(x_tr,y_tr)
y_lasso_pre = lasso.predict(x_t)
print(' Lasso Regression Model ')
print('Coef : ', linear.coef_)
print('Intercept : ', linear.intercept_)
print('MSE : ', mean_squared_error(y_t,y_lasso_pre))
print('R2-Score : ', r2_score(y_t,y_lasso_pre))
print('^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^')
#_ Ridge Regression Model
ridge = Ridge(alpha=1)
ridge.fit(x_tr,y_tr)
y_ridge_pre = ridge.predict(x_t)
print(' Ridge Regression Model ')
print('Coef : ', linear.coef_)
print('Intercept : ', linear.intercept_)
print('MSE : ', mean_squared_error(y_t,y_ridge_pre))
print('R2-Score : ', r2_score(y_t,y_ridge_pre))
print('^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^')
############################################################################################
| mohamed-malk/Machine-Learning-Deep-Learning | Machine Learning/Regression/Credit Card Balance/Code.py | Code.py | py | 3,240 | python | en | code | 0 | github-code | 13 |
45599142394 |
# coding: utf-8
from ...characterType import numeric, alphaNumeric
from ...row import RowElement, Row
class Header(Row):
def __init__(self):
Row.__init__(self)
self.elements = [
RowElement(
index=0,
description="Banco - Código do Banco na Compensação",
numberOfCharacters=3,
charactersType=numeric,
),
RowElement(
index=1,
description="Lote - Lote de Serviço",
numberOfCharacters=4,
charactersType=numeric,
value='0000'
),
RowElement(
index=2,
description="Registro - Tipo de Registro",
numberOfCharacters=1,
charactersType=numeric,
value='0'
),
RowElement(
index=3,
description="CNAB - Uso Exclusivo FEBRABAN / CNAB",
numberOfCharacters=6,
charactersType=alphaNumeric,
),
RowElement(
index=4,
description="No DA VERSÃO DO LAYOUT DO ARQUIVO",
numberOfCharacters=3,
charactersType=numeric,
value='081'
),
RowElement(
index=5,
description="Empresa - Tipo de Inscrição da Empresa",
numberOfCharacters=1,
charactersType=numeric,
value='2' # 1-CPF, 2-CNPJ
),
RowElement(
index=6,
description="Empresa - Número de Inscrição da Empresa",
numberOfCharacters=14,
charactersType=numeric,
),
RowElement(
index=7,
description="Empresa - Código do Convênio no Banco",
numberOfCharacters=20,
charactersType=alphaNumeric,
),
RowElement(
index=8,
description="Empresa - Agência Mantenedora da Conta",
numberOfCharacters=5,
charactersType=numeric,
),
RowElement(
index=9,
description="Empresa - Dígito Verificador da Agência",
numberOfCharacters=1,
charactersType=alphaNumeric,
),
RowElement(
index=10,
description="Empresa - Número da Conta Corrente",
numberOfCharacters=12,
charactersType=numeric,
),
RowElement(
index=11,
description="Empresa - Dígito Verificador da Conta",
numberOfCharacters=1,
charactersType=alphaNumeric,
),
RowElement(
index=12,
description="Empresa - Dac da agencia/conta debitada",
numberOfCharacters=1,
charactersType=numeric,
),
RowElement(
index=13,
description="Empresa - Nome da Empresa",
numberOfCharacters=30,
charactersType=alphaNumeric,
),
RowElement(
index=14,
description="Nome do Banco",
numberOfCharacters=30,
charactersType=alphaNumeric,
),
RowElement(
index=15,
description="CNAB - Uso Exclusivo FEBRABAN / CNAB",
numberOfCharacters=10,
charactersType=alphaNumeric,
),
RowElement(
index=16,
description="Arquivo - Código Remessa / Retorno",
numberOfCharacters=1,
charactersType=numeric,
value="1" #1- REMESSA, 2- RETORNO
),
RowElement(
index=17,
description="Arquivo - Data de Geração do Arquivo",
numberOfCharacters=8,
charactersType=numeric,
),
RowElement(
index=18,
description="Arquivo - Hora de Geração do Arquivo",
numberOfCharacters=6,
charactersType=numeric,
),
RowElement(
index=19,
description="Complemento de Registro",
numberOfCharacters=9,
charactersType=numeric,
),
RowElement(
index=20,
description="Arquivo - Densidade de Gravação do Arquivo",
numberOfCharacters=5,
charactersType=numeric,
),
RowElement(
index=21,
description="Complemento de Registro",
numberOfCharacters=69,
charactersType=alphaNumeric,
)
]
def update(self):
import datetime
now = datetime.datetime.now()
self.elements[17].setValue(now.strftime("%d%m%Y")) # Dia que o arquivo foi gerado
self.elements[18].setValue(now.strftime("%H%M%S")) # Horario que o arquivo foi gerado
def setUser(self, user):
self.elements[6].setValue(user.identifier)
self.elements[13].setValue(user.name)
def setUserBank(self, bank):
self.elements[0].setValue(bank.bankId)
self.elements[8].setValue(bank.branchCode)
self.elements[10].setValue(bank.accountNumber)
self.elements[12].setValue(bank.accountVerifier)
| rfschubert/febraban | febraban/cnab240/v83/file/header.py | header.py | py | 5,690 | python | en | code | 1 | github-code | 13 |
37643157938 | import sys
import pygame
from time import sleep
from bullet import Bullet
from alien import Alien
def check_keydown_events(event, ai_settings, screen, ship, bullets): #检测按下按键
if event.key == pygame.K_RIGHT:
ship.moving_right = True
elif event.key == pygame.K_LEFT:
ship.moving_left = True
elif event.key == pygame.K_UP:
ship.moving_up = True
elif event.key == pygame.K_DOWN:
ship.moving_down = True
elif event.key == pygame.K_SPACE:
fire_bullet(ai_settings, screen, ship, bullets)
elif event.key == pygame.K_q:
sys.exit()
def check_keyup_events(event, ship): #检测松开按键
if event.key == pygame.K_RIGHT:
ship.moving_right = False
elif event.key == pygame.K_LEFT:
ship.moving_left = False
elif event.key == pygame.K_UP:
ship.moving_up = False
elif event.key == pygame.K_DOWN:
ship.moving_down = False
def check_events(ai_settings, screen, stats, sb, play_button, ship, aliens, bullets): #检测按键行为
for event in pygame.event.get():
if event.type == pygame.QUIT:
sys.exit()
elif event.type == pygame.MOUSEBUTTONDOWN:
mouse_x, mouse_y, = pygame.mouse.get_pos()
check_play_button(ai_settings, screen, stats, sb, play_button, ship,
aliens, bullets, mouse_x, mouse_y)
elif event.type == pygame.KEYDOWN:
check_keydown_events(event, ai_settings, screen, ship, bullets)
elif event.type == pygame.KEYUP:
check_keyup_events(event, ship)
def check_play_button(ai_settings, screen, stats, sb, play_button, ship, aliens, bullets, mouse_x, mouse_y):
button_clicked = play_button.rect.collidepoint(mouse_x, mouse_y) #True or False
if button_clicked and not stats.game_active: #当点击了Play且游戏处于非活动状态时,才重新开始
ai_settings.initialize_dynamic_settings() #重置速度
pygame.mouse.set_visible(False) #隐藏光标
stats.reset_stats() #重置游戏信息
stats.game_active = True
sb.prep_score() #重置记分牌
sb.prep_high_score()
sb.prep_level()
sb.prep_ships()
aliens.empty() #清空
bullets.empty()
create_fleet(ai_settings, screen, ship, aliens)
ship.center_ship()
def update_screen(ai_settings, screen, stats, sb, ship, aliens, bullets, play_button):
screen.fill(ai_settings.bg_color) #背景色填充
for bullet in bullets.sprites(): #绘制子弹
bullet.draw_bullet()
ship.blitme() #绘制飞船
aliens.draw(screen) #绘制外星人
sb.show_score() #显示得分
if not stats.game_active: #绘制按钮
play_button.draw_button()
pygame.display.flip() #更新屏幕
def fire_bullet(ai_settings, screen, ship, bullets):
if len(bullets) < ai_settings.bullets_allowed: #限制子弹数,小于限制数才会创建新子弹
new_bullet = Bullet(ai_settings, screen, ship)
bullets.add(new_bullet)
def check_bullet_alien_collisions(ai_settings, screen, stats, sb, ship, aliens, bullets):
collisions = pygame.sprite.groupcollide(bullets, aliens, True, True) #检测子弹和外星人碰撞
if collisions:
for aliens in collisions.values():
stats.score += ai_settings.alien_points * len(aliens)
sb.prep_score()
check_high_score(stats, sb)
if len(aliens) == 0: #若外星人消灭完了
bullets.empty() #删除现有的子弹
ai_settings.increase_speed() #加速
stats.level += 1 #升级
sb.prep_level()
create_fleet(ai_settings, screen, ship, aliens) #重新创建一批外星人
def check_high_score(stats, sb):
if stats.score > stats.high_score:
stats.high_score = stats.score
sb.prep_high_score()
def update_bullets(ai_settings, screen, stats, sb, ship, aliens, bullets):
bullets.update()
for bullet in bullets.copy(): #遍历编组的副本,在循环中修改bullets
if bullet.rect.bottom <= 0: #若子弹移出屏幕
bullets.remove(bullet) #将其从编组中删除
check_bullet_alien_collisions(ai_settings, screen, stats, sb, ship, aliens, bullets)
def check_fleet_edges(ai_settings, aliens):
for alien in aliens.sprites():
if alien.check_edges(): #若外星人到达左右边缘,下移并改变运动方向
change_fleet_direction(ai_settings, aliens)
break #退出for循环
def change_fleet_direction(ai_settings, aliens):
for alien in aliens.sprites():
alien.rect.y += ai_settings.fleet_drop_speed #外星人下移
ai_settings.fleet_direction *= -1 #改变移动方向
def update_aliens(ai_settings, screen, stats, sb, ship, aliens, bullets):
check_fleet_edges(ai_settings, aliens) #检测是否有外星人到达边缘
aliens.update() #更新外星人位置
if pygame.sprite.spritecollideany(ship, aliens): #若外星人撞到飞船
ship_hit(ai_settings, screen, stats, sb, ship, aliens, bullets)
check_aliens_bottom(ai_settings, screen, stats, sb, ship, aliens, bullets)
def check_aliens_bottom(ai_settings, screen, stats, sb, ship, aliens, bullets):
screen_rect = screen.get_rect()
for alien in aliens.sprites():
if alien.rect.bottom >= screen_rect.bottom: #若外星人到达底部
ship_hit(ai_settings, screen, stats, sb, ship, aliens, bullets)
break
def ship_hit(ai_settings, screen, stats, sb, ship, aliens, bullets):
if stats.ships_left > 0:
stats.ships_left -= 1 #飞船数减1
sb.prep_ships()
aliens.empty() #清空外星人
bullets.empty() #清空子弹
create_fleet(ai_settings, screen, ship, aliens) #重新创建一批外星人
ship.center_ship() #将飞船置于屏幕底端中央
sleep(0.5) #暂停
else:
stats.game_active = False
pygame.mouse.set_visible(True) #游戏结束,显示光标
def get_number_aliens_x(ai_settings, alien_width):
available_space_x = ai_settings.screen_width - 2 * alien_width
number_aliens_x = int(available_space_x / (2 * alien_width))
return number_aliens_x #返回每行外星人数
def get_number_rows(ai_settings, ship_height, alien_height):
available_space_y = (ai_settings.screen_height - (3 * alien_height) - ship_height)
number_rows = int(available_space_y / (2 * alien_height))
return number_rows #返回外星人行数
def create_fleet(ai_settings, screen, ship, aliens):
alien = Alien(ai_settings, screen)
number_alien_x = get_number_aliens_x(ai_settings, alien.rect.width)
number_rows = get_number_rows(ai_settings, ship.rect.height, alien.rect.height)
for row_number in range(number_rows):
for alien_number in range(number_alien_x):
create_alien(ai_settings, screen, aliens, alien_number, row_number)
def create_alien(ai_settings, screen, aliens, alien_number, row_number):
alien = Alien(ai_settings, screen) #创建一个外星人
alien_width = alien.rect.width
alien.x = alien_width + 2 * alien_width * alien_number #计算各外星人x坐标
alien.rect.x = alien.x
alien.rect.y = alien.rect.height + 2 * alien.rect.height * row_number #计算各行外星人y坐标
aliens.add(alien) #加入编组 | MachoHH/Alien_Project | game_function.py | game_function.py | py | 8,346 | python | en | code | 0 | github-code | 13 |
30598685474 | import json
from shutil import copyfile
#ruta = "/home/nian/Documentos/composerAutomation/composer.json"
#archivo = open(ruta, 'r')
with open("composer.json") as myJson:
datos = json.loads(myJson.read())
with open("composer2.json") as myJson2:
datos2 = json.loads(myJson2.read())
datos.update(datos2)
myJson.close()
myJson2.close()
#print(datos)
def generarJson(datos):
s = json.dumps(datos,indent=4) #convertirlo a texto oara guardar como json
print(s)
f = open("composer.json","w")
f.write(s)
f.close()
generarJson(datos)
| nian8203/composerAutomation | updateJsonFinal.py | updateJsonFinal.py | py | 566 | python | es | code | 0 | github-code | 13 |
25115787699 | __author__ = 'patras'
'''A robot is searching for an object in the environment consisting of a few locations.
The robot knows the map. It is a rigid state variable.
The robot moves from one location to another using Djikstra's shortest path.
It has a battery that needs to be recharged after some moves.
A move consumes 1/4 of the battery capacity.'''
from domains.constants import *
from shared import gui
from shared.timer import globalTimer
from shared import GLOBALS # needed for heuristic (zero or domainSpecific), and for planning mode in environment
import numpy
class FetchDomain():
def __init__(self, state, rv, actor, env):
# params:
# state: the state the domain operates on
# actor: needed for do_task and do_command
# env: the environment the domain gathers/senses information from
self.state = state
self.actor = actor
self.env = env
self.rv = rv
#actor.declare_commands([self.put, self.take, self.perceive, self.charge, self.move, self.moveToEmergency, self.addressEmergency, self.wait, self.fail])
actor.declare_task('search', 'r', 'o')
actor.declare_task('fetch', 'r', 'o')
actor.declare_task('recharge', 'r', 'c')
actor.declare_task('moveTo', 'r', 'l')
actor.declare_task('emergency', 'r', 'l', 'i')
actor.declare_task('nonEmergencyMove', 'r', 'l1', 'l2', 'dist')
actor.declare_methods('search', self.Search_Method1, self.Search_Method2)
actor.declare_methods('fetch', self.Fetch_Method1, self.Fetch_Method2)
actor.declare_methods('recharge', self.Recharge_Method1, self.Recharge_Method2, self.Recharge_Method3)
actor.declare_methods('moveTo', self.MoveTo_Method1)
actor.declare_methods('emergency', self.Emergency_Method1)
actor.declare_methods('nonEmergencyMove', self.NonEmergencyMove_Method1)
if GLOBALS.GetHeuristicName() == 'zero':
actor.declare_heuristic('search', self.Heuristic1)
actor.declare_heuristic('fetch', self.Heuristic1)
elif GLOBALS.GetHeuristicName() == 'domainSpecific':
actor.declare_heuristic('search', self.Heuristic2)
actor.declare_heuristic('fetch', self.Heuristic2)
def GetCommand(self, cmd_name):# save the commands by name
return getattr(self, cmd_name)
# Using Dijsktra's self.actororithm
def GETDISTANCE(self, l0, l1):
visitedDistances = {l0: 0}
locs = list(self.rv.LOCATIONS)
while locs:
min_loc = None
for loc in locs:
if loc in visitedDistances:
if min_loc is None:
min_loc = loc
elif visitedDistances[loc] < visitedDistances[min_loc]:
min_loc = loc
if min_loc is None:
break
locs.remove(min_loc)
current_dist = visitedDistances[min_loc]
for l in self.rv.EDGES[min_loc]:
dist = current_dist + 1
if l not in visitedDistances or dist < visitedDistances[l]:
visitedDistances[l] = dist
return visitedDistances[l1]
def fail(self,):
return FAILURE
def take(self, r, o):
self.state.load.AcquireLock(r)
if self.state.load[r] == NIL:
self.state.pos.AcquireLock(o)
if self.state.loc[r] == self.state.pos[o]:
start = globalTimer.GetTime()
while(globalTimer.IsCommandExecutionOver('take', start) == False):
pass
res = self.env.Sense('take')
if res == SUCCESS:
gui.Simulate("Robot %s has picked up object %s\n" %(r, o))
self.state.pos[o] = r
self.state.load[r] = o
else:
gui.Simulate("Non-deterministic event has made the take command fail\n")
else:
gui.Simulate("Robot %s is not at object %s's location\n" %(r, o))
res = FAILURE
self.state.pos.ReleaseLock(o)
else:
gui.Simulate("Robot %s is not free to take anything\n" %r)
res = FAILURE
self.state.load.ReleaseLock(r)
return res
def put(self, r, o):
self.state.pos.AcquireLock(o)
if self.state.pos[o] == r:
start = globalTimer.GetTime()
self.state.loc.AcquireLock(r)
self.state.load.AcquireLock(r)
while(globalTimer.IsCommandExecutionOver('put', start) == False):
pass
res = self.env.Sense('put')
if res == SUCCESS:
gui.Simulate("Robot %s has put object %s at location %d\n" %(r,o,self.state.loc[r]))
self.state.pos[o] = self.state.loc[r]
self.state.load[r] = NIL
else:
gui.Simulate("Robot %s has failed to put %s because of some internal error")
self.state.loc.ReleaseLock(r)
self.state.load.ReleaseLock(r)
else:
gui.Simulate("Object %s is not with robot %s\n" %(o,r))
res = FAILURE
self.state.pos.ReleaseLock(o)
return res
def charge(self, r, c):
self.state.loc.AcquireLock(r)
self.state.pos.AcquireLock(c)
if self.state.loc[r] == self.state.pos[c] or self.state.pos[c] == r:
self.state.charge.AcquireLock(r)
start = globalTimer.GetTime()
while(globalTimer.IsCommandExecutionOver('charge', start) == False):
pass
res = self.env.Sense('charge')
if res == SUCCESS:
self.state.charge[r] = 4
gui.Simulate("Robot %s is fully charged\n" %r)
else:
gui.Simulate("Charging of robot %s failed due to some internal error.\n" %r)
self.state.charge.ReleaseLock(r)
else:
gui.Simulate("Robot %s is not in the charger's location or it doesn't have the charger with it\n" %r)
res = FAILURE
self.state.loc.ReleaseLock(r)
self.state.pos.ReleaseLock(c)
return res
def moveToEmergency(self, r, l1, l2, dist):
self.state.loc.AcquireLock(r)
self.state.charge.AcquireLock(r)
if l1 == l2:
gui.Simulate("Robot %s is already at location %s\n" %(r, l2))
res = SUCCESS
elif self.state.loc[r] == l1 and self.state.charge[r] >= dist:
start = globalTimer.GetTime()
while(globalTimer.IsCommandExecutionOver('move', start) == False):
pass
res = self.env.Sense('moveToEmergency')
if res == SUCCESS:
gui.Simulate("Robot %s has moved from %d to %d\n" %(r, l1, l2))
self.state.loc[r] = l2
self.state.charge[r] = self.state.charge[r] - dist
else:
gui.Simulate("Moving failed due to some internal error\n")
elif self.state.loc[r] != l1 and self.state.charge[r] >= dist:
gui.Simulate("Robot %s is not in location %d\n" %(r, l1))
res = FAILURE
elif self.state.loc[r] == l1 and self.state.charge[r] < dist:
gui.Simulate("Robot %s does not have enough charge to move :(\n" %r)
self.state.charge[r] = 0 # should we do this?
res = FAILURE
else:
gui.Simulate("Robot %s is not at location %s and it doesn't have enough charge!\n" %(r, l1))
res = FAILURE
self.state.loc.ReleaseLock(r)
self.state.charge.ReleaseLock(r)
if res == FAILURE:
self.state.emergencyHandling.AcquireLock(r)
self.state.emergencyHandling[r] = False
self.state.emergencyHandling.ReleaseLock(r)
return res
def perceive(self, l):
self.state.view.AcquireLock(l)
if self.state.view[l] == False:
start = globalTimer.GetTime()
while(globalTimer.IsCommandExecutionOver('perceive', start) == False):
pass
self.env.Sense('perceive')
for c in self.state.containers[l]:
self.state.pos.AcquireLock(c)
self.state.pos[c] = l
self.state.pos.ReleaseLock(c)
self.state.view[l] = True
gui.Simulate("Perceived location %d\n" %l)
else:
gui.Simulate("Already perceived\n")
self.state.view.ReleaseLock(l)
return SUCCESS
def move(self, r, l1, l2, dist):
self.state.emergencyHandling.AcquireLock(r)
if self.state.emergencyHandling[r] == False:
self.state.loc.AcquireLock(r)
self.state.charge.AcquireLock(r)
if l1 == l2:
gui.Simulate("Robot %s is already at location %s\n" %(r, l2))
res = SUCCESS
elif self.state.loc[r] == l1 and (self.state.charge[r] >= dist or self.state.load[r] == 'c1'):
start = globalTimer.GetTime()
while(globalTimer.IsCommandExecutionOver('move', start) == False):
pass
res = self.env.Sense('move')
if res == SUCCESS:
gui.Simulate("Robot %s has moved from %d to %d\n" %(r, l1, l2))
self.state.loc[r] = l2
if self.state.load[r] != 'c1':
self.state.charge[r] = self.state.charge[r] - dist
else:
gui.Simulate("Robot %s failed to move due to some internal failure\n" %r)
elif self.state.loc[r] != l1 and self.state.charge[r] >= dist:
gui.Simulate("Robot %s is not in location %d\n" %(r, l1))
res = FAILURE
elif self.state.loc[r] == l1 and self.state.charge[r] < dist:
gui.Simulate("Robot %s does not have enough charge to move :(\n" %r)
self.state.charge[r] = 0 # should we do this?
res = FAILURE
else:
gui.Simulate("Robot %s is not at location %s and it doesn't have enough charge!\n" %(r, l1))
res = FAILURE
self.state.loc.ReleaseLock(r)
self.state.charge.ReleaseLock(r)
else:
gui.Simulate("Robot is addressing emergency so it cannot move.\n")
res = FAILURE
self.state.emergencyHandling.ReleaseLock(r)
return res
def addressEmergency(self, r, l, i):
self.state.loc.AcquireLock(r)
self.state.emergencyHandling.AcquireLock(r)
if self.state.loc[r] == l:
start = globalTimer.GetTime()
while(globalTimer.IsCommandExecutionOver('addressEmergency', start) == False):
pass
res = self.env.Sense('addressEmergency')
if res == SUCCESS:
gui.Simulate("Robot %s has addressed emergency %d\n" %(r, i))
else:
gui.Simulate("Robot %s has failed to address emergency due to some internal error \n" %r)
else:
gui.Simulate("Robot %s has failed to address emergency %d\n" %(r, i))
res = FAILURE
self.state.emergencyHandling[r] = False
self.state.loc.ReleaseLock(r)
self.state.emergencyHandling.ReleaseLock(r)
return res
def wait(self, r):
while(self.state.emergencyHandling[r] == True):
start = globalTimer.GetTime()
while(globalTimer.IsCommandExecutionOver('wait', start) == False):
pass
#gui.Simulate("Robot %s is waiting for emergency to be over\n" %r)
self.env.Sense('wait')
return SUCCESS
def Recharge_Method3(self, r, c):
""" Robot r charges and carries the charger with it """
if self.state.loc[r] != self.state.pos[c] and self.state.pos[c] != r:
if self.state.pos[c] in self.rv.LOCATIONS:
self.actor.do_task('moveTo', r, self.state.pos[c])
else:
robot = self.state.pos[c]
self.actor.do_command(self.put, robot, c)
self.actor.do_task('moveTo', r, self.state.pos[c])
self.actor.do_command(self.charge, r, c)
self.actor.do_command(self.take, r, c)
def Recharge_Method2(self, r, c):
""" Robot r charges and does not carry the charger with it """
if self.state.loc[r] != self.state.pos[c] and self.state.pos[c] != r:
if self.state.pos[c] in self.rv.LOCATIONS:
self.actor.do_task('moveTo', r, self.state.pos[c])
else:
robot = self.state.pos[c]
self.actor.do_command(self.put, robot, c)
self.actor.do_task('moveTo', r, self.state.pos[c])
self.actor.do_command(self.charge, r, c)
def Recharge_Method1(self, r, c):
""" When the charger is with another robot and that robot takes the charger back """
robot = NIL
if self.state.loc[r] != self.state.pos[c] and self.state.pos[c] != r:
if self.state.pos[c] in self.rv.LOCATIONS:
self.actor.do_task('moveTo', r, self.state.pos[c])
else:
robot = self.state.pos[c]
self.actor.do_command(self.put, robot, c)
self.actor.do_task('moveTo', r, self.state.pos[c])
self.actor.do_command(self.charge, r, c)
if robot != NIL:
self.actor.do_command(self.take, robot, c)
def Search_Method1(self, r, o):
if self.state.pos[o] == UNK:
toBePerceived = NIL
for l in self.rv.LOCATIONS:
if self.state.view[l] == False:
toBePerceived = l
break
if toBePerceived != NIL:
self.actor.do_task('moveTo', r, toBePerceived)
self.actor.do_command(self.perceive, toBePerceived)
if self.state.pos[o] == toBePerceived:
if self.state.load[r] != NIL:
self.actor.do_command(self.put, r, self.state.load[r])
self.actor.do_command(self.take, r, o)
else:
self.actor.do_task('search', r, o)
else:
gui.Simulate("Failed to search %s" %o)
self.actor.do_command(self.fail)
else:
gui.Simulate("Position of %s is already known\n" %o)
def Search_Method2(self, r, o):
if self.state.pos[o] == UNK:
toBePerceived = NIL
for l in self.rv.LOCATIONS:
if self.state.view[l] == False:
toBePerceived = l
break
if toBePerceived != NIL:
self.actor.do_task('recharge', r, 'c1') # is this allowed?
self.actor.do_task('moveTo', r, toBePerceived)
self.actor.do_command(self.perceive, toBePerceived)
if self.state.pos[o] == toBePerceived:
if self.state.load[r] != NIL:
self.actor.do_command(self.put, r, self.state.load[r])
self.actor.do_command(self.take, r, o)
else:
self.actor.do_task('search', r, o)
else:
gui.Simulate("Failed to search %s" %o)
self.actor.do_command(self.fail)
else:
gui.Simulate("Position of %s is already known\n" %o)
def Fetch_Method1(self, r, o):
pos_o = self.state.pos[o]
if pos_o == UNK:
self.actor.do_task('search', r, o)
else:
if self.state.loc[r] != pos_o:
self.actor.do_task('moveTo', r, pos_o)
if self.state.load[r] != NIL:
self.actor.do_command(self.put, r, self.state.load[r])
self.actor.do_command(self.take, r, o)
def Fetch_Method2(self, r, o):
pos_o = self.state.pos[o]
if pos_o == UNK:
self.actor.do_task('search', r, o)
else:
if self.state.loc[r] != pos_o:
self.actor.do_task('recharge', r, 'c1')
self.actor.do_task('moveTo', r, pos_o)
if self.state.load[r] != NIL:
self.actor.do_command(self.put, r, self.state.load[r])
self.actor.do_command(self.take, r, o)
def Emergency_Method1(self, r, l, i):
if self.state.emergencyHandling[r] == False:
self.state.emergencyHandling[r] = True
load_r = self.state.load[r]
if load_r != NIL:
self.actor.do_command(self.put, r, load_r)
l1 = self.state.loc[r]
dist = self.GETDISTANCE(l1, l)
self.actor.do_command(self.moveToEmergency, r, l1, l, dist)
self.actor.do_command(self.addressEmergency, r, l, i)
else:
gui.Simulate("%r is already busy handling another emergency\n" %r)
self.actor.do_command(self.fail)
def NonEmergencyMove_Method1(self, r, l1, l2, dist):
if self.state.emergencyHandling[r] == False:
self.actor.do_command(self.move, r, l1, l2, dist)
else:
self.actor.do_command(self.wait, r)
self.actor.do_command(self.move, r, l1, l2, dist)
def MoveTo_Method1(self, r, l):
x = self.state.loc[r]
dist = self.GETDISTANCE(x, l)
if self.state.charge[r] >= dist or self.state.load[r] == 'c1':
self.actor.do_task('nonEmergencyMove', r, x, l, dist)
else:
self.state.charge[r] = 0
gui.Simulate("Robot %s does not have enough charge to move from %d to %d\n" %(r, x, l))
self.actor.do_command(self.fail)
def Heuristic1(self, args):
return float("inf")
def Heuristic2(self, args):
robot = args[0]
return 5 * self.state.charge[robot]
# TODO
#def goalMethod1(self):
# pass
class FetchEnv():
def __init__(self, state, rv):
self.commandProb = {
'take': [0.9, 0.1],
'put': [0.99, 0.01],
'charge': [0.90, 0.10],
'moveToEmergency': [0.99, 0.01],
'move': [0.95, 0.05],
'addressEmergency': [0.98, 0.02],
}
self.state = state
self.rv = rv
def Sense(self, cmd):
if cmd == 'perceive':
if GLOBALS.GetPlanningMode() == True:
return self.SenseObjects()
else:
return SUCCESS
elif cmd == 'wait':
if GLOBALS.GetPlanningMode() == True:
for r in self.rv.ROBOTS:
self.state.emergencyHandling[r] = False
else:
p = self.commandProb[cmd]
outcome = numpy.random.choice(len(p), 50, p=p)
res = outcome[0]
if res == 0:
return SUCCESS
else:
return FAILURE
def SenseObjects(self):
total = 0
for loc in self.state.containers:
self.state.containers[loc] = []
if self.state.view[loc] == False:
total += 1
for o in self.rv.OBJECTS:
prob = {}
if self.state.pos[o] == UNK:
for l in self.state.view:
if self.state.view[l] == False:
prob[l] = 1/total
p = list(prob.values())
locs = list(prob.keys())
locIndex = numpy.random.choice(len(p), 50, p=p)
self.state.containers[locs[locIndex[0]]].append(o) | patras91/rae_release | domains/fetch/domain_fetch.py | domain_fetch.py | py | 19,625 | python | en | code | 1 | github-code | 13 |
14463333892 |
import numpy as np
from numpy import SHIFT_UNDERFLOW, pi
from neuroaiengines.utils.angles import wrap_pi
import math
import functools
# pylint: disable=not-callable
from scipy.optimize import minimize
def _root_k(k, fwhm):
c = np.log(np.cosh(k))/k
return np.square(np.cos(fwhm/2) - c)
def _simp_vonmises(a,loc,k):
x = np.exp(k*np.cos(a-loc))
return x
def create_activation_fn(num_neurons=27, encoding_range=[-0.75*pi, 0.75*pi], fwhm=pi/18, scaling_factor=1,**kwargs):
"""
Creates an activation function that returns the activation of num_neurons given a landmark angle.
parameters:
----------
:param num_neurons : the number of ring neurons
:param encoding_range: the range of the encoding, [min, max]
:param fwhm: full width, half max of the bump centered around each ring neuron's receptive field. In radians.
:param scaling_factor: How much the final current is scaled.
:returns activation_fn: function(ang, slc)
an activation function that returns activations given landmark angles
parameters:
----------
angs: float
angle
slc: slice
a slice to truncate the output to a certain number
"""
r_min, r_max = encoding_range
rng = r_max - r_min
centers = create_preferred_angles(num_neurons,centered=True, rng=rng, **kwargs)
# standard devation calculation from FWHM
std = fwhm/(2*np.sqrt(2*np.log(2)))
k = 1/np.sqrt(std)
k = minimize(_root_k,x0=k,args=fwhm, bounds=[(0.1,np.inf)]).x
maxx = np.exp(np.abs(k))
minx = np.exp(-np.abs(k))
rescale = lambda x: (x - minx)/(maxx-minx)
# Just compute using k
def activation_fn(ang, slc=slice(0,None)):
ret = rescale(_simp_vonmises(centers,ang,k))
return ret*scaling_factor
activation_fn.k = k
return activation_fn
def create_epg_bump_fn(num_neurons, scaling_factor=2,fwhm=pi/2, hemisphere_offset=False):
if hemisphere_offset:
fn = create_activation_fn(num_neurons*2, encoding_range=[-pi,pi], fwhm=fwhm, scaling_factor=scaling_factor, hemisphere_offset=hemisphere_offset)
else:
fn = create_activation_fn(num_neurons, encoding_range=[-pi,pi], fwhm=fwhm, scaling_factor=scaling_factor)
def hemfn(ang):
# We assume ang is single dimensional
val = fn(ang)
if not hemisphere_offset:
return np.hstack((val,val))
else:
return val
hemfn.k = fn.k
return functools.cache(hemfn)
def create_tiled_bump_fn(num_neurons, bins_per_neuron=10,tile=2, **kwargs):
fn = create_activation_fn(num_neurons, **kwargs)
def hemfn(ang):
val = fn(ang)
return np.tile(val, (1,tile))
hemfn.k = fn.k
return hemfn
def create_pref_dirs(*args, **kwargs):
"""
Create circular preferred directions in two dimenions (cos, sin)
sz: int
number of neurons to create the directions for. Output shape will be (sz,2)
centered: bool
If centered, the preferred directions will be centered on 0, such that the preferred direction of the sz/2 neuron is 0.
rng: float
The total range that the preferred directions cover.
returns:
--------
pref_dirs: np.array((sz,2))
Preferred directions of the neurons. Feed this into the ensemble's encoders
"""
pref_angles = create_preferred_angles(*args, **kwargs)
pref_dirs = np.array(
[
[np.cos(a), np.sin(a)]
for a in pref_angles
]
)
return pref_dirs
def create_preferred_angles(sz, centered=False, rng=2*pi, hemisphere_offset=False):
"""Generates preferred angles for a ring of neurons
Args:
sz (int): number of neurons in the ring
centered (bool, optional): If the preferred angles should be centered on 0. If true, preferred_angles[sz//2] ~= 0. Otherwise, preferred_angles[0] ~-0. Defaults to False.
rng (float, optional): Range of encoding. Defaults to 2*pi.
hemisphere_offset (bool, optional): If true, indicates that the number of neurons includes two hemispheres and that the encoding should return in L/R order.
For example, if hemisphere_offset==False, the order of preferred_angles would be [0L,0R,1L,1R...], but if hemisphere_offset==True, the order would be [0L,1L,2L..0R,1R,2R...].
Defaults to False.
Returns:
[type]: [description]
"""
if centered:
min_a = math.floor(sz/2)
max_a = math.ceil(sz/2)
else:
min_a = 0
max_a = sz
pref_angs = np.array(
[
rng * (t + 1) / sz
for t in np.arange(-min_a, max_a)
]
)
if hemisphere_offset:
return unshuffle_ring(pref_angs)
return pref_angs
def create_sine_epg_activation(sz, offset=1., scale=0.2):
"""
Creates a sine-based EPG bump function based off preferred angles.
parameters:
----------
sz: size of the output
offset,scale: offset and scale of the output, such that the bump will be between (offset-1)*scale and (offset+1)*scale.
"""
epg_pref_dirs = create_pref_dirs(sz,centered=True)
epg_pref_dirs = np.tile(epg_pref_dirs, (2,1))
def fn(angle):
"""
Gets the epg activation given an angle.
parameters:
----------
angle: float
Angle in radians
returns:
-------
activation: np.array(18)
activation of the EPGs. Units are arbitrary.
"""
# Double it for each hemisphere
sc = np.array([np.cos(angle), np.sin(angle)])
activation = np.dot(epg_pref_dirs, sc.T)
return (activation+offset)*scale
return fn
# For legacy code
get_epg_activation = create_sine_epg_activation(9, 0, 1)
def create_decoding_fn(sz, sincos=False, backend=np, hemisphere_offset=False):
"""
Creates a function to return the angle given bumps of EPG activity
"""
if not hemisphere_offset:
epg_pref_dirs = create_pref_dirs(sz,centered=True)
try:
epg_pref_dirs = backend.tensor(epg_pref_dirs)
except:
pass
epg_pref_dirs = backend.tile(epg_pref_dirs, (2,1)).T
else:
epg_pref_dirs = create_pref_dirs(sz*2,centered=True, hemisphere_offset=hemisphere_offset).T
try:
epg_pref_dirs = backend.tensor(epg_pref_dirs)
except:
pass
def decode(act):
sc = backend.matmul(epg_pref_dirs,act)
if sincos:
return sc
return backend.arctan2(sc[1],sc[0])
return decode
def shuffle_ring(a):
"""
Shuffles the order of ring with two hemispheres from [0L,1L,2L...NL,0R,1R,2R...NR] to [0L,0R,1L,1R,2L,2R...NL,NR]
Args:
a (np.array): Array to be shuffled
Returns:
np.array: Shuffled array
"""
return np.array(list(zip(a[:len(a)//2],a[len(a)//2:]))).ravel()
def unshuffle_ring(a):
"""
Unshuffles the order of ring with two hemispheres from [0L,0R,1L,1R,2L,2R...NL,NR] to [0L,1L,2L...NL,0R,1R,2R...NR]
Args:
a (np.array): Array to be unshuffled
Returns:
np.array: Unshuffled array
"""
return np.concatenate((a[::2],a[1::2]))
| aplbrain/seismic | neuroaiengines/utils/signals.py | signals.py | py | 7,307 | python | en | code | 0 | github-code | 13 |
42447288124 | def sort_array_by_parity(nums: list[int]):
"""
Given an integer array nums, move all the even integers at the beginning
of the array followed by all the odd integers.
Return any array that satisfies this condition.
"""
i, j = 0, len(nums) - 1
while i <= j:
if nums[i] % 2 != 0 and nums[j] % 2 == 0:
nums[i], nums[j] = nums[j], nums[i]
i += 1
j -= 1
elif nums[i] % 2 != 0:
j -= 1
elif nums[j] % 2 == 0:
i += 1
else:
i += 1
j -= 1
return nums
print(sort_array_by_parity([3, 1, 2, 4]))
print(sort_array_by_parity([0])) | Dimaaap/Leetcode | Easy/905.) Sort Array By Parity.py | 905.) Sort Array By Parity.py | py | 665 | python | en | code | 0 | github-code | 13 |
16502378107 | from tinydb import Query, TinyDB
filename = "test2.json"
db = TinyDB(filename)
db.drop_table('fruits')
table = db.table('fruits') # 테이블 생성
# tuple 삽입
table.insert({'name':'사과','price':5000,'지역 이름':'인천'})
table.insert({'name':'바나나','price':7000})
table.insert({'name':'망고','price':8000})
table.insert({'name':'레몬','price':5500})
# tuple 조회
print(table.all())
item = Query()
# 특정 조건의 tuple을 검색
res = table.search(item.name == '사과')
print(res[0]['name']) # 반환은 리스트 형태라서 res[0]를 붙임
res = table.search(item.price > 6000)
for i in res:
print('-',i['name'],i['price']) | So-chankyun/Crawling_Study | week5/ex1.py | ex1.py | py | 665 | python | ko | code | 0 | github-code | 13 |
10552525819 | import random
size = int(input('Введите размерность массива: '))
lst = [random.randint(0, 30) for i in range(size)]
minim = min(lst)
delta = int(input('Введите значение delta: '))
counter = 0
for k in range(size):
if lst[k] - delta == minim:
counter += 1
print(counter) | extraterrrestria/sr6 | bonus.py | bonus.py | py | 322 | python | ru | code | 0 | github-code | 13 |
2715270081 | from scipy.cluster.hierarchy import linkage, fcluster
from sklearn.cluster import KMeans
from enbios2.experiment.bw_vector_db.create_vectors import get_all_vector_docs
from enbios2.experiment.bw_vector_db.psql_vectorDB import Document
def kmeans(docs: list[Document], k: int = 35):
kmeans = KMeans(n_clusters=k, random_state=0).fit([doc.embedding for doc in docs])
return kmeans
def hierarchical_clustering(docs: list[Document]):
Z = linkage([doc.embedding for doc in docs], method='ward')
# Plotting dendrogram
# plt.figure(figsize=(25, 10))
# plt.title('Hierarchical Clustering Dendrogram')
# plt.xlabel('sample index')
# plt.ylabel('distance')
# dendrogram(
# Z,
# leaf_rotation=90., # rotates the x axis labels
# leaf_font_size=8., # font size for the x axis labels
# )
# plt.show()
# return Z
import plotly.figure_factory as ff
fig = ff.create_dendrogram(Z)
fig.update_layout(width=800, height=500)
fig.write_html("dendogram.html")
# fig.show()
return Z
def hierarchical(Z):
max_d = 1 # max_d as in max_distance
clusters = fcluster(Z, max_d, criterion='distance')
# Extract keywords for each cluster
# vectorizer = TfidfVectorizer()
# unique_clusters = set(clusters)
# for i in unique_clusters:
# cluster_sentences = [sent for sent, cluster in zip(sentences, clusters) if cluster == i]
# X = vectorizer.fit_transform(cluster_sentences)
# keywords = vectorizer.get_feature_names_out()[X.sum(axis=0).argmax()]
# print(f"Cluster {i} keywords: {keywords}")
return clusters
if __name__ == "__main__":
print("Running clustering.py")
docs = get_all_vector_docs()
k500 = kmeans(docs, 500)
| LIVENlab/enbios | enbios2/experiment/bw_vector_db/cluster.py | cluster.py | py | 1,766 | python | en | code | 3 | github-code | 13 |
39752322572 | from datetime import date, timedelta
# Third Party Imports
import pytest
from pubsub import pub
# RAMSTK Package Imports
from ramstk.models.dbrecords import RAMSTKMatrixRecord
from ramstk.models.dbtables import RAMSTKMatrixTable
from tests import MockDAO
DESCRIPTION = "validation-requirement"
@pytest.fixture
def mock_dao(monkeypatch):
"""Create a mock database table."""
_matrix_one = RAMSTKMatrixRecord()
_matrix_one.revision_id = 1
_matrix_one.matrix_id = 1
_matrix_one.description = DESCRIPTION
_matrix_one.column_id = 6
_matrix_one.row_id = 3
_matrix_one.correlation = 1
_matrix_two = RAMSTKMatrixRecord()
_matrix_two.revision_id = 1
_matrix_two.matrix_id = 2
_matrix_two.description = DESCRIPTION
_matrix_two.column_id = 6
_matrix_two.row_id = 4
_matrix_two.correlation = 2
dao = MockDAO()
dao.table = [
_matrix_one,
_matrix_two,
]
yield dao
@pytest.fixture
def test_attributes():
"""Create a dict of Matrix attributes."""
yield {
"revision_id": 1,
"matrix_id": 1,
"description": DESCRIPTION,
"column_id": 6,
"row_id": 3,
"correlation": 1,
}
@pytest.fixture(scope="class")
def test_table_model():
"""Get a table model instance for each test function."""
# Create the device under test (dut) and connect to the database.
dut = RAMSTKMatrixTable()
yield dut
# Unsubscribe from pypubsub topics.
pub.unsubscribe(dut.do_get_attributes, "request_get_matrix_attributes")
pub.unsubscribe(dut.do_set_attributes, "request_set_matrix_attributes")
pub.unsubscribe(dut.do_set_attributes, "wvw_editing_matrix")
pub.unsubscribe(dut.do_update, "request_update_matrix")
pub.unsubscribe(dut.do_select_all, "selected_revision")
pub.unsubscribe(dut.do_get_tree, "request_get_matrix_tree")
pub.unsubscribe(dut.do_delete, "request_delete_matrix")
pub.unsubscribe(dut.do_insert, "request_insert_matrix")
# Delete the device under test.
del dut
| ReliaQualAssociates/ramstk | tests/models/programdb/matrix/conftest.py | conftest.py | py | 2,055 | python | en | code | 34 | github-code | 13 |
1588880812 | import math
import time
from rlbot.agents.base_agent import SimpleControllerState
import util.util as util
from util.vec import Vec3
from util.orientation import relative_location
from util.util import predict_ball_path, GOAL_HOME
class State():
"""State objects dictate the bot's current objective.
These objects are used to control the behavior of the bot at a high level. States are reponsible for determining
which controller to use as well as what actions the car needs to take. States do not directly determine controller inputs.
State names should be descriptive and unique.
Currently Implemented States:
BallChase
States in Development:
Shoot
Defend
Attributes: expired (bool)
"""
def __init__(self):
"""Creates a new unexpired state"""
self.expired = False
def execute(self, agent):
"""Executes the State's behavior.
This function must be overridden by other States.
Attributes:
agent (BaseAgent): The bot
Returns:
Nothing.
When overridden this function should return a SimpleControllerState() containing the commands for the car.
"""
pass
def checkAvailable(self, agent):
"""Checks to see if the state is available. The default state is unavailable
Attributes:
agent (BaseAgent): the bot
Returns:
bool: False unless overridden. True means available, false means unavailable.
"""
return False
class BallChase(State):
"""BallChase aims to drive the car straight toward the ball
This State has no regard for other cars or the movement of the ball. This is a simple state not meant for use in-game.
Note:
This state is always available and expires after every tick.
"""
def __init__(self):
"""Creates an unexpired instance of BallChase"""
super().__init__()
self.ticks = 0
def checkAvailable(self, agent):
"""This state is always available"""
return True
def checkExpire(self):
"""Determines if the state is no longer useful"""
self.ticks = self.ticks + 1
if self.ticks > 30:
self.expired = True
def execute(self, agent):
"""Attempts to drive the car toward the ball.
Overrides the State class's execute function. The ground controller is automatically used and the target
location is set to the ball's current location.
Attributes:
agent (BaseAgent): The bot
Returns:
SimpleControllerState: the set of commands to give the bot.
"""
self.checkExpire()
State.execute(self, agent)
target_location = agent.ball.local_location
return groundController(agent, target_location)
class Shoot(State):
"""Shoot attempts to hit the ball toward the opponent's goal"""
def __init__(self):
"""Creates an instance of Shoot"""
super().__init__()
def checkExpire(self, agent):
"""Determines if the state is no longer useful"""
if util.sign(agent.ball.location.y) == util.sign(agent.team):
self.expired = True
def checkAvailable(self, agent):
"""Determines if the state is an available option"""
if util.sign(agent.ball.location.y) != util.sign(agent.team):
return True
else:
return False
def execute(self, agent):
"""Attempts to hit the ball in a way that pushes it toward the goal"""
self.checkExpire(agent)
team = util.sign(agent.team)
targetGoal = util.GOAL_HOME * -team
ball_to_goal = targetGoal - agent.ball.location
#distance_to_goal = ball_to_goal.length()
direction_to_goal = ball_to_goal.normalized()
aim_location = agent.ball.location - (direction_to_goal * util.BALL_RADIUS)
local_target = relative_location(agent.me.location, agent.me.rotation, aim_location)
return groundController(agent, local_target)
class Defend(State):
"""Defend attempts to divert the ball away from the bot's own goal"""
def __init__(self):
"""Creates an instance of Defend"""
super().__init__()
def checkAvailable(self, agent):
"""Available when the ball is on the friendly side of the field"""
if util.sign(agent.ball.location.y) == util.sign(agent.team):
return True
return False
def checkExpired(self, agent):
if util.sign(agent.ball.location.y) != util.sign(agent.team):
self.expired = True
def execute(self, agent):
self.checkExpired(agent)
team = util.sign(agent.team)
ball_path = predict_ball_path(agent)
danger = False
for loc in ball_path:
if(math.fabs(loc.y) > math.fabs(util.FIELD_LENGTH / 2)):
danger = True
target_location = agent.ball.local_location
if danger:
#aim to hit ball to the side
#detect of ball is east or west of bot
east_multiplier = util.sign(agent.ball.location.x - agent.me.location.x)
#aim for side of the ball
aim_location = agent.ball.location + Vec3(east_multiplier * util.BALL_RADIUS, 0, 0)
target_location = relative_location(agent.me.location, agent.me.rotation, aim_location)
elif agent.ball.local_location.length() > 1500:
#get in goal
target_location = relative_location(agent.me.location, agent.me.rotation, util.GOAL_HOME * team)
elif agent.ball.local_location.length() < 500:
return shotController(agent, util.GOAL_HOME * -team)
return groundController(agent, target_location)
class AimShot(State):
"""Aims the shot toward the net"""
def __init__(self):
"""Creates an instance of AimShot"""
super().__init__()
def checkAvailable(self, agent):
"""If the ball is between the car and the goal, it is possible to shoot"""
ballDirection = agent.ball.local_location
goal_location = relative_location(agent.me.location, agent.me.rotation, util.GOAL_HOME*-util.sign(agent.team))
angle = ballDirection.ang_to(goal_location)
if angle < (math.pi / 2):
return True
return False
def checkExpired(self, agent, team):
"""If the ball is not reasonably close to being between the car and the goal, the state expires"""
ballDirection = agent.ball.local_location
goal_location = relative_location(agent.me.location, agent.me.rotation, util.GOAL_HOME*-team)
angle = ballDirection.ang_to(goal_location)
if angle < (math.pi / 2):
return False
return True
def execute(self, agent):
team = util.sign(agent.team)
self.expired = self.checkExpired(agent, team)
return shotController(agent, util.GOAL_HOME*team*-1)
def groundController(agent, target_location):
"""Gives a set of commands to move the car along the ground toward a target location
Attributes:
target_location (Vec3): The local location the car wants to aim for
Returns:
SimpleControllerState: the set of commands to achieve the goal
"""
controllerState = SimpleControllerState()
ball_direction = target_location;
distance = target_location.flat().length()
angle = -math.atan2(ball_direction.y,ball_direction.x)
if angle > math.pi:
angle -= 2*math.pi
elif angle < -math.pi:
angle += 2*math.pi
speed = 0.0
turn_rate = 0.0
r1 = 250
r2 = 1000
if distance <= r1:
#calculate turn direction
if(angle > 0):
turn_rate = -1.0
elif(angle < 0):
turn_rate = 1.0
#if toward ball move forward
if(abs(angle) < math.pi / 4):
speed = 1.0
else: #if not toward ball reverse, flips turn rate to adjust
turn_rate = turn_rate * -1.0
speed = -1.0
#if far away, move at full speed forward
elif distance >= r2:
speed = 1.0
if agent.me.velocity.length() < 2250:
controllerState.boost = True
if(angle > math.pi/32):
turn_rate = -1.0
elif(angle < -math.pi/32):
turn_rate = 1.0
#if mid range, adjust forward
else:
#adjust angle
if(angle > math.pi/32):
turn_rate = -1.0
elif(angle < -math.pi/32):
turn_rate = 1.0
#adjust speed
if agent.me.velocity.length() < 2250:
controllerState.boost = True
if abs(angle) < math.pi / 2:
speed = 1.0
else:
speed = 0.5
controllerState.throttle = speed
controllerState.steer = turn_rate
return controllerState
def shotController(agent, shotTarget):
"""Gives a set of commands to make the car shoot the ball
This function will flip the car into the ball in order to make a shot and
it will adjust the car's speed and positioning to help make the shot.
Attributes:
shotTarget (Vec3): The position that we want to hit the ball toward
Returns:
SimpleControllerState: the set of commands to achieve the goal
"""
controllerState = SimpleControllerState()
#get ball distance and angle from car
ball_direction = agent.ball.local_location
ball_distance = agent.ball.local_location.flat().length()
ball_angle = -math.atan2(ball_direction.y,ball_direction.x)
if ball_angle > math.pi:
ball_angle -= 2*math.pi
elif ball_angle < -math.pi:
ball_angle += 2*math.pi
#get target distance and angle from ball
ball_to_target = shotTarget - agent.ball.location
target_distance = ball_to_target.length()
ball_to_target_unit = ball_to_target.normalized()
if(ball_distance < 400):
flipReady = True
else:
flipReady = False
#flipping
if(flipReady):
time_diff = time.time() - agent.timer1
if time_diff > 2.2:
agent.timer1 = time.time()
elif time_diff <= 0.1:
#jump and turn toward the ball
controllerState.jump = True
if ball_angle > 0:
controllerState.yaw = -1
elif ball_angle < 0:
controllerState.yaw = 1
elif time_diff >= 0.1 and time_diff <= 0.15:
#keep turning
controllerState.jump = False
if ball_angle > 0:
controllerState.yaw = -1
elif ball_angle < 0:
controllerState.yaw = 1
elif time_diff > 0.15 and time_diff < 1:
#flip
controllerState.jump = True
if ball_angle > 0:
controllerState.yaw = -1
elif ball_angle < 0:
controllerState.yaw = 1
if math.fabs(ball_angle) > math.pi:
controllerState.pitch = 1
else:
controllerState.pitch = -1
else:
flipReady = False
controllerState.jump = False
else:
aim_location = agent.ball.location - (ball_to_target_unit * util.BALL_RADIUS)
local_target = relative_location(agent.me.location, agent.me.rotation, aim_location)
controllerState = groundController(agent, local_target)
return controllerState | sDauenbaugh/FirstBot | src/states.py | states.py | py | 11,804 | python | en | code | 0 | github-code | 13 |
25593085160 | from typing import List
class Solution:
def recurSubset(self, nums: List[int], ans: List[int], ds: List[int], idx: int):
ans.append(ds[:])
for i in range(idx, len(nums)):
# do not pick if element is same as previous one
if i != idx and nums[i] == nums[i - 1] :
continue
ds.append(nums[i])
# pick next i
self.recurSubset(nums, ans, ds, i + 1)
ds.pop()
def subsetsWithDup(self, nums: List[int]) -> List[List[int]]:
ans = []
ds = []
nums.sort()
self.recurSubset(nums, ans, ds, 0)
return ans
if __name__ == '__main__':
arr = [3, 1, 5, 2]
ob = Solution()
ans = ob.subsetsWithDup(arr)
ans.sort()
for x in ans:
print(x,end=" ")
print("")
# Expected Output - [[],[1],[1,2],[1,2,3],[1,2,3,5],[1,2,5],[1,3],[1,3,5],[1,5],[2],[2,3],[2,3,5],[2,5],[3],[3,5],[5]]
| avantika0111/Striver-SDE-Sheet-Challenge-2023 | Recursion/PrintlUniqueSubsets.py | PrintlUniqueSubsets.py | py | 942 | python | en | code | 0 | github-code | 13 |
30957343364 |
# 2'. Напишите программу, которая найдёт произведение пар чисел списка.
# Парой считаем первый и последний элемент, второй и предпоследний и т.д.
# - [2, 3, 4, 5, 6] =>[12,15,16] ([2*6, 3*5, 4*4]);
# - [2, 3, 5, 6] => [12,15] ( [2*6, 3*5])
def sum_list(list, index, i):
multiplier = list[index] * list[-i]
print(multiplier)
def multiplier_list(list):
print(list)
index = 0
i = 1
if len(list) % 2 == 0:
while index != len(list) / 2:
sum_list(list, index, i)
index += 1
i += 1
else:
while index != len(list) // 2 + 1:
sum_list(list, index, i)
index += 1
i += 1
print('Enter the list items separated by a space: ')
list = list(map(int, input().split()))
multiplier_list(list)
| BoaL22/3_homework_lesson_three | 2_second_task.py | 2_second_task.py | py | 935 | python | ru | code | 0 | github-code | 13 |
25160035398 | import pygame
import random
from pygame import *
pygame.init()
pygame.display.set_caption("Minesweeper")
list=[True,True,True,False,True,True,True,False,30 ,False]
# run,bomb,close,plyer,gm, mainloop, file, fps,hint
clock = pygame.time.Clock()
size=[]
clicks=[]
list1=[]
bomb=[]
#bomb=[[40,60],[40,80],[40,100],[40,120],[40,140],[40,160],[60,160],[80,160],[100,160],[120,160],[140,160],[160,160],[160,140],[160,120],[160,100],[160,80],[160,60],[60,60],[80,60],[100,60],[120,60],[140,60]]
bomb_no1=[]
flag=[]
none=pygame.image.load('mine\\none.jpg')
zero=pygame.image.load('mine\\zero.png')
one=pygame.image.load('mine\\one.png')
two=pygame.image.load('mine\\two.png')
three=pygame.image.load('mine\\three.png')
four=pygame.image.load('mine\\four.png')
five=pygame.image.load('mine\\five.png')
six=pygame.image.load('mine\six.png')
seven=pygame.image.load('mine\seven.png')
eight=pygame.image.load('mine\eight.png')
minered=pygame.image.load('mine\\minered.png')
mine=pygame.image.load('mine\\mine.png')
flag1=pygame.image.load('mine\\flag.png')
dead=pygame.image.load('mine\\dead.png')
happy=pygame.image.load('mine\\happy.PNG')
hope=pygame.image.load('mine\\click.PNG')
pygame.display.set_icon(flag1)
def grid():
for y in range(60,size[1]):
if y%20==0:
for x in range(0,size[0]):
if x%20==0:
list1.append([x,y])
#print(list1)
def bomb_and_no():
i=1
while size[2]>=i:
bx=random.randrange(0,size[0],20)
by=random.randrange(60,size[1],20)
if [bx,by] in bomb:
bomb.remove([bx,by])
i-=1
bomb.append([bx,by])
if [bx,by] in fclick:
bomb.remove([bx,by])
i-=1
i+=1
#print(bomb)
for x,y in list1:
i=0
for a,b in bomb:
if x==a and y==b:
i=9
break
if x+20==a and y==b:
i+=1
if x+20==a and y+20==b:
i+=1
if x==a and y+20==b:
i+=1
if x+20==a and y-20==b:
i+=1
if x==a and y-20==b:
i+=1
if x-20==a and y-20==b:
i+=1
if x-20==a and y==b:
i+=1
if x-20==a and y+20==b:
i+=1
bomb_no1.append(i)
def pos(a,b):
if a%20!=0:
xx=int(a/20)
xx=xx*20
else :
xx=a
if b%20!=0:
y=int(b/20)
y=y*20
else:
y=b
return xx,y
def screen(win):
pygame.draw.rect(win,(192,192,192),(0,0,size[0],60))
win.blit(happy,(int((size[0]/2)-3),17))
i=0
for x,y in list1:
if y>40 :
win.blit(none,(x,y))
for a,b in flag:
if x==a and b==y:
win.blit(flag1,(x,y))
for a,b in clicks:
if x==a and b==y:
if bomb_no1[i]==0 and y>40 :
win.blit(zero,(x,y))
if bomb_no1[i]==1 and y>40:
win.blit(one,(x,y))
if bomb_no1[i]==2 and y>40:
win.blit(two,(x,y))
if bomb_no1[i]==3 and y>40:
win.blit(three,(x,y))
if bomb_no1[i]==4:
win.blit(four,(x,y))
if bomb_no1[i]==5:
win.blit(five,(x,y))
if bomb_no1[i]==6:
win.blit(six,(x,y))
if bomb_no1[i]==7:
win.blit(seven,(x,y))
if bomb_no1[i]==8:
win.blit(eight,(x,y))
if bomb_no1[i]==9:
win.blit(minered,(x,y))
i+=1
#fclick=list(dict.fromkeys(fclick))
def first1():
pygame.display.update()
clock.tick(list[8])
for event in pygame.event.get():
if event.type == pygame.QUIT:
list[0] =list[2]=list[4]=list[5]= False
list[6]=True
if event.type==pygame.MOUSEBUTTONDOWN:
xx,y=pos(event.pos[0],event.pos[1])
fclick.append([xx,y])
nebor(xx,y)
bomb_and_no()
for a,b in fclick:
clicks.append([a,b])
uncover()
screen(win)
fclick=[]
def nebor(xx,y):
fclick.append([xx+20,y])
fclick.append([xx,y+20])
fclick.append([xx+20,y+20])
fclick.append([xx-20,y])
fclick.append([xx,y-20])
fclick.append([xx-20,y-20])
fclick.append([xx+20,y-20])
fclick.append([xx-20,y+20])
fclick1.append([xx,y])
fclick1=[]
def nebors(xx,y):
if xx+20<size[0] and y+20<size[1] and xx-20>-20 and y-20>40:
if bomb_no1[list1.index([xx,y])]==0 and [xx,y] not in fclick1:
nebor(xx,y)
if bomb_no1[list1.index([xx+20,y])]==0 and [xx+20,y] not in fclick1:
nebor(xx+20,y)
if bomb_no1[list1.index([xx,y+20])]==0 and [xx,y+20] not in fclick1:
nebor(xx,y+20)
if bomb_no1[list1.index([xx+20,y+20])]==0 and [xx+20,y+20] not in fclick1:
nebor(xx+20,y+20)
if bomb_no1[list1.index([xx-20,y])]==0 and [xx-20,y] not in fclick1:
nebor(xx-20,y)
if bomb_no1[list1.index([xx,y-20])]==0 and [xx,y-20] not in fclick1:
nebor(xx,y-20)
if bomb_no1[list1.index([xx-20,y-20])]==0 and [xx-20,y-20] not in fclick1:
nebor(xx-20,y-20)
if bomb_no1[list1.index([xx+20,y-20])]==0 and [xx+20,y-20] not in fclick1:
nebor(xx+20,y-20)
if bomb_no1[list1.index([xx-20,y+20])]==0 and [xx-20,y+20] not in fclick1:
nebor(xx-20,y+20)
else:
if y==60 or xx==0 or xx==size[0]-20 or y==size[1]-20:
if bomb_no1[list1.index([xx,y])]==0 and [xx,y] not in fclick1:
nebor(xx,y)
def uncover():
for xx,y in fclick:
if xx<size[0] and y<size[1] and xx>-20 and y>40:
if bomb_no1[list1.index([xx,y])]==0:
nebors(xx,y)
for a,b in fclick:
if [a,b] in clicks:
clicks.remove([a,b])
clicks.append([a,b])
if b<60 or b>size[1]-20 or a<0 or a>size[0]-20:
clicks.remove([a,b])
fclick.clear()
def check(xx,y):
if y>40:
if bomb_no1[list1.index([xx,y])] == 0:
nebor(xx,y)
uncover()
def action(win,down):
clock.tick(list[8])
for event in pygame.event.get():
if event.type == pygame.QUIT:
list[0]=list[2]=list[4]=list[5]= False
if event.type == pygame.MOUSEBUTTONUP:
down=False
l=True
xx,y=pos(event.pos[0],event.pos[1])
if event.button==1:
for x,fy in clicks:
if x==xx and y==fy:
clicks.remove([xx,y])
if fy<60 :
clicks.remove([xx,fy])
clicks.append([xx,y])
check(xx,y)
if [xx,y] in flag:
flag.remove([xx,y])
clicks.remove([xx,y])
l=False
if l and [xx,y] in bomb:
print('game over')
list[0]=list[3]=list[6]=False
if event.button==3:
if [xx,y] in flag:
flag.remove([xx,y])
l=False
if l:
flag.append([xx,y])
screen(win)
return down
def game(win):
#for a,b in bomb:
# flag.append([a,b])
screen(win)
while len(clicks)==0:
first1()
while list[0]:
pygame.display.update()
clock.tick(list[8])
down=False
for event in pygame.event.get():
if event.type == pygame.QUIT:
list[0] =list[2]=list[4]=list[5]= False
list[6]=True
#print(event)
if event.type==pygame.KEYDOWN and list[9]==False:
if event.key==104:
#print("yes")
list[9]=True
if event.type == pygame.MOUSEBUTTONDOWN:
down=True
if event.pos[0]>int((size[0]/2)-3) and event.pos[1]>17 and event.pos[0]<int((size[0]/2)-3)+26 and event.pos[1]<17+26:
list[2]=list[0]=list[4]=list[1]=down=False
clicks.clear()
bomb.clear()
bomb_no1.clear()
flag.clear()
fclick1.clear()
win.blit(hope,(int((size[0]/2)-3),17))
pygame.display.update()
while down :
down=action(win,down)
if len(clicks)+len(bomb)==size[3]:
list[0]=list[6] =False
list[3]=True
print('you won')
if list[9]:
xx,y=pos(pygame.mouse.get_pos()[0],pygame.mouse.get_pos()[1])
if [xx,y] not in clicks:
if [xx,y] in bomb :
flag.append([xx,y])
elif [xx,y] not in clicks:
clicks.append([xx,y])
list[9]=False
check(xx,y)
#print(xx,y)
screen(win)
def end():
while list[2]:
clock.tick(list[8])
if list[3]==False:
win.blit(dead,(int((size[0]/2)-3),17))
i=0
for x,y in list1:
if y>40:
win.blit(none,(x,y))
if bomb_no1[i]==9 and list[3]==False:
win.blit(mine,(x,y))
for a,b in flag:
if x==a and b==y:
win.blit(flag1,(x,y))
for c,d in clicks:
if x==c and y==d:
if bomb_no1[i]==0 and y>40 :
win.blit(zero,(x,y))
if bomb_no1[i]==1 and y>40:
win.blit(one,(x,y))
if bomb_no1[i]==2 and y>40:
win.blit(two,(x,y))
if bomb_no1[i]==3 and y>40:
win.blit(three,(x,y))
if bomb_no1[i]==4:
win.blit(four,(x,y))
if bomb_no1[i]==5:
win.blit(five,(x,y))
if bomb_no1[i]==6:
win.blit(six,(x,y))
if bomb_no1[i]==7:
win.blit(seven,(x,y))
if bomb_no1[i]==8 :
win.blit(eight,(x,y))
if bomb_no1[i]==9:
win.blit(minered,(x,y))
i+=1
pygame.display.update()
for event in pygame.event.get():
if event.type == pygame.QUIT:
list[5]=list[2]=False
if event.type == pygame.MOUSEBUTTONDOWN and event.pos[0]>int((size[0]/2)-3) and event.pos[1]>17 and event.pos[0]<int((size[0]/2)-3)+26 and event.pos[1]<17+26:
list[0]=list[6]=True
list[2]=list[4]=list[1]=False
clicks.clear()
bomb.clear()
bomb_no1.clear()
flag.clear()
fclick1.clear()
def start():
n=int(input("Select Your Difficulty:\n1.Easy(9X9)\n2.Medium(16X16)\n3.Hard(30X16)\n4.Custom\n"))
if n==1:
size.append(180)
size.append(240)
size.append(10)
size.append(81)
if n==2:
size.append(320)
size.append(380)
size.append(40)
size.append(256)
if n==3:
size.append(600)
size.append(360)
size.append(99)
size.append(480)
if n==4:
while list[4]:
a=int(input("Enter rows\n"))
b=int(input("Enter coloums\n"))+3
if a<6 and b<11:
print("Game should be greater or equal to 6X8")
else:
size.append(a*20)
size.append(b*20)
list[4]=False
while list[1]:
size.append(int(input("Enter bomb\n")))
print(size)
if size[2]<a*b:
list[1]=False
else:
size.pop(2)
size.append(int(a*(b-3)))
start()
grid()
while list[5]:
if list[0]==False and list[6]==True:
list[0]=True
elif list[0]:
win = pygame.display.set_mode((size[0],size[1]))
game(win)
if list[6]==False:
list[2]=True
end()
pygame.quit()
| parteekmalik/minesweeper | minesweeper.py | minesweeper.py | py | 12,718 | python | en | code | 1 | github-code | 13 |
8525088956 | import os
import numpy as np
import pandas as pd
import tensorflow as tf
import keras.api._v2.keras as keras
from keras.api._v2.keras import layers, optimizers, losses, models,\
regularizers
from keras.api._v2.keras.preprocessing.image import ImageDataGenerator
from util.util import *
from util.my_tf_callback import LearningRateA, saver
from sklearn.metrics import confusion_matrix, classification_report
import time
from util.datasets_util import balance, preprocessing
from util.report_util import *
plt.rcParams['font.sans-serif'] = ['Microsoft YaHei']
gpus = tf.config.experimental.list_physical_devices(device_type="GPU")
tf.config.experimental.set_virtual_device_configuration(
device=gpus[0],
logical_devices=[tf.config.experimental.VirtualDeviceConfiguration(
memory_limit=4375)]
)
# 加载数据集
dataset_name = "origin"
dataset_dir = "datasets"
categories = ["train", "test", "valid"]
# 获取所有的目录
for category in categories:
category_dir = os.path.join(dataset_dir, category)
# 获取该目录下的所有目录
classes = os.listdir(category_dir)
# 将目录下所有的文件地址放入列表中
filenames = []
labels = []
for clazz in classes:
clazz_dir = os.path.join(category_dir, clazz)
f_names = os.listdir(clazz_dir)
for f in f_names:
filenames.append(os.path.join(clazz_dir, f))
labels.append(clazz)
# 将准备好的数据放入pandas中
f_features = pd.Series(filenames, name="filepaths")
labels = pd.Series(labels, name="labels")
if category == "train":
train_df = pd.concat([f_features, labels], axis=1)
elif category == "test":
test_df = pd.concat([f_features, labels], axis=1)
else:
valid_df = pd.concat([f_features, labels], axis=1)
# 得到pandas Dataframe
# print(train_df)
# print(test_df)
# print(valid_df)
def scalar(img):
return img * 1./255.
# 然后将其转换为tf的数据生成器
train_gen = ImageDataGenerator(
preprocessing_function=scalar,
# 设置随机旋转角度 15度 # 设置随机水平翻转 # 设置随机垂直翻转
rotation_range=15, horizontal_flip=True, vertical_flip=True)
test_valid_gen = ImageDataGenerator(preprocessing_function=scalar)
# 设置超参数
epochs = 10
batch_size = 128
learning_rate = 0.001
image_size = (224, 224)
# image_size = (150, 150)
num_classes = 325
min_sample_size = 0
max_sample_size = 140
label_column_name = "labels"
work_dir = "./datasets"
model_name = "my-cnn"
# 之前没有平衡数据集,现在采用平衡技术
# dataset_name = "balance"
# train_df, test_df, valid_df = preprocessing("datasets")
# train_df = balance(train_df, min_sample_size, max_sample_size, work_dir,
# label_column_name, image_size)
# 获取数据加载器
train_dataloader = train_gen.flow_from_dataframe(
# 指定图像来源列及标签列,batch_size
train_df, x_col="filepaths", y_col="labels", batch_size=batch_size,
# 将图像转换为的大小, 指定是分类任务
shuffle=True, color_mode='rgb', target_size=image_size,
class_mode="categorical")
test_dataloader = test_valid_gen.flow_from_dataframe(
# 指定图像来源列及标签列,batch_size
test_df, x_col="filepaths", y_col="labels", batch_size=batch_size,
# 将图像转换为的大小, 指定是分类任务
target_size=image_size, color_mode='rgb', class_mode="categorical"
)
valid_dataloader = test_valid_gen.flow_from_dataframe(
# 指定图像来源列及标签列,batch_size
valid_df, x_col="filepaths", y_col="labels", batch_size=batch_size,
# 将图像转换为的大小, 指定是分类任务
target_size=image_size, color_mode='rgb', class_mode="categorical"
)
# 构建模型
# 第一版训练速度太慢,而且 10 个 epochs 的准确率只能达到70%
# 两个 epochs 只达到了 27%-32% 之间
version = 1
model = models.Sequential([
keras.Input(shape=(224, 224, 3)),
layers.Conv2D(16, (3, 3), (1, 1), padding="valid", use_bias=True),
layers.BatchNormalization(),
layers.ReLU(),
layers.Conv2D(32, (3, 3), (2, 2), padding="valid", use_bias=True),
layers.BatchNormalization(),
layers.ReLU(),
layers.Conv2D(64, (3, 3), (2, 2), padding="valid", use_bias=True),
layers.BatchNormalization(),
layers.ReLU(),
layers.Conv2D(128, (5, 5), (2, 2), padding="valid", use_bias=True),
layers.BatchNormalization(),
layers.ReLU(),
layers.Conv2D(128, (5, 5), (2, 2), padding="valid", use_bias=True),
layers.BatchNormalization(),
layers.ReLU(),
layers.AvgPool2D(pool_size=(2, 2), strides=(2, 2)),
layers.Flatten(),
layers.Dense(1024, activation='relu', use_bias=True),
layers.Dense(num_classes, activation='softmax')
])
# 第二版新增2个卷积层,将图片压缩到 1x1x512
# 减少参数数量,将其中一个全连接层移除
# 第二版参数量下降45%, 但是所需内存并没有减少
# 训练速度不变,但是引入MaxPool后,模型准确率提升很大 测试集37.2%,验证集37.7%, 训练集37.2%
# version = 2
# model = models.Sequential([
# keras.Input(shape=(150, 150, 3)),
# layers.Conv2D(32, (3, 3), (1, 1), padding="same", use_bias=True),
# layers.BatchNormalization(),
# layers.ReLU(),
# layers.MaxPool2D(pool_size=(2, 2), strides=(2, 2)),
# layers.Conv2D(32, (3, 3), (1, 1), padding="valid", use_bias=True),
# layers.BatchNormalization(),
# layers.ReLU(),
# layers.Conv2D(64, (3, 3), (2, 2), padding="valid", use_bias=True),
# layers.BatchNormalization(),
# layers.ReLU(),
# layers.Conv2D(128, (3, 3), (2, 2), padding="valid", use_bias=True),
# layers.BatchNormalization(),
# layers.ReLU(),
# layers.Conv2D(256, (3, 3), (2, 2), padding="valid", use_bias=True),
# layers.BatchNormalization(),
# layers.ReLU(),
# layers.Conv2D(512, (3, 3), (1, 1), padding="valid", use_bias=True),
# layers.BatchNormalization(),
# layers.ReLU(),
# layers.AvgPool2D(pool_size=(2, 2), strides=(2, 2)),
# layers.Flatten(),
# layers.Dense(num_classes, activation='softmax')
# ])
# 第三版,引入更多的池化层
# 比第二版多了45%的参数量,在第二版的基础上多加了4个MaxPool
# 训练速度不变,2个epochs的测试准确率49.66%,验证集准确率47.63%, 训练集47.6%
# 十次训练后,过拟合现象非常明显。测试准确率86.07%,验证集准确率46.83%, 训练集46.21%
# version = 3
# model = models.Sequential([
# keras.Input(shape=(150, 150, 3)),
# layers.Conv2D(32, (3, 3), (1, 1), padding="same", use_bias=True),
# layers.BatchNormalization(),
# layers.ReLU(),
# layers.MaxPool2D(pool_size=(2, 2), strides=(2, 2)),
# layers.Conv2D(32, (3, 3), (1, 1), padding="same", use_bias=True),
# layers.BatchNormalization(),
# layers.ReLU(),
# layers.MaxPool2D(pool_size=(2, 2), strides=(2, 2)),
# layers.Conv2D(64, (3, 3), (1, 1), padding="same", use_bias=True),
# layers.BatchNormalization(),
# layers.ReLU(),
# layers.MaxPool2D(pool_size=(2, 2), strides=(2, 2)),
# layers.Conv2D(128, (3, 3), (1, 1), padding="same", use_bias=True),
# layers.BatchNormalization(),
# layers.ReLU(),
# layers.MaxPool2D(pool_size=(2, 2), strides=(2, 2)),
# layers.Conv2D(256, (3, 3), (1, 1), padding="same", use_bias=True),
# layers.BatchNormalization(),
# layers.ReLU(),
# layers.MaxPool2D(pool_size=(2, 2), strides=(2, 2)),
# layers.Conv2D(512, (3, 3), (1, 1), padding="same", use_bias=True),
# layers.BatchNormalization(),
# layers.ReLU(),
# layers.AvgPool2D(pool_size=(2, 2), strides=(2, 2)),
# layers.Flatten(),
# layers.Dense(num_classes, activation='softmax')
# ])
# 第四版
# 第三版的问题仍然严峻,准确率还未突破90%,并且存在极大的过拟合问题
# 再添加一层卷积加池化,将输入压缩为1 x 1 x 1024
# 尝试添加dropout,l2正则化
# 十次训练后,验证集:69.23%, 测试集:69.35%, 训练集:68.6%
# 但又产生了训练效率低的问题
# 我似乎无法想出更好的
# version = 4
# model = models.Sequential([
# keras.Input(shape=(224, 224, 3)),
# layers.Conv2D(64, (3, 3), (1, 1), padding="same", use_bias=True),
# layers.BatchNormalization(),
# layers.ReLU(),
# layers.MaxPool2D(pool_size=(2, 2), strides=(2, 2)),
# layers.Conv2D(64, (3, 3), (1, 1), padding="same", use_bias=True),
# layers.BatchNormalization(),
# layers.ReLU(),
# layers.MaxPool2D(pool_size=(2, 2), strides=(2, 2)),
# layers.Conv2D(128, (3, 3), (1, 1), padding="same", use_bias=True),
# layers.BatchNormalization(),
# layers.ReLU(),
# layers.MaxPool2D(pool_size=(2, 2), strides=(2, 2)),
# layers.Conv2D(128, (3, 3), (1, 1), padding="same", use_bias=True),
# layers.BatchNormalization(),
# layers.ReLU(),
# layers.MaxPool2D(pool_size=(2, 2), strides=(2, 2)),
# layers.Conv2D(256, (3, 3), (1, 1), padding="same", use_bias=True),
# layers.BatchNormalization(),
# layers.ReLU(),
# layers.MaxPool2D(pool_size=(2, 2), strides=(2, 2)),
# layers.Conv2D(512, (3, 3), (1, 1), padding="same", use_bias=True),
# layers.BatchNormalization(),
# layers.ReLU(),
# layers.MaxPool2D(pool_size=(2, 2), strides=(2, 2)),
# layers.Conv2D(1024, (3, 3), (1, 1), padding="same", use_bias=True),
# layers.BatchNormalization(),
# layers.ReLU(),
# layers.AvgPool2D(pool_size=(2, 2), strides=(2, 2)),
# layers.Flatten(),
# layers.Dropout(rate=0.3),
# layers.Dense(512, activation='relu',
# kernel_regularizer=regularizers.l2(0.01)),
# layers.Dense(num_classes, activation='softmax')
# ])
# 试试VGG的A模型
# version = 1
# model = models.Sequential([
# keras.Input(shape=(150, 150, 3)),
# layers.Conv2D(64, (3, 3), (1, 1), padding="same", use_bias=True),
# layers.BatchNormalization(),
# layers.ReLU(),
# layers.MaxPool2D(pool_size=(2, 2), strides=(2, 2)),
# layers.Conv2D(128, (3, 3), (1, 1), padding="same", use_bias=True),
# layers.BatchNormalization(),
# layers.ReLU(),
# layers.MaxPool2D(pool_size=(2, 2), strides=(2, 2)),
# layers.Conv2D(256, (3, 3), (1, 1), padding="same", use_bias=True),
# layers.BatchNormalization(),
# layers.ReLU(),
# layers.Conv2D(256, (3, 3), (1, 1), padding="same", use_bias=True),
# layers.BatchNormalization(),
# layers.ReLU(),
# layers.MaxPool2D(pool_size=(2, 2), strides=(2, 2)),
# layers.Conv2D(512, (3, 3), (1, 1), padding="same", use_bias=True),
# layers.BatchNormalization(),
# layers.ReLU(),
# layers.Conv2D(512, (3, 3), (1, 1), padding="same", use_bias=True),
# layers.BatchNormalization(),
# layers.ReLU(),
# layers.MaxPool2D(pool_size=(2, 2), strides=(2, 2)),
# layers.Conv2D(512, (3, 3), (1, 1), padding="same", use_bias=True),
# layers.BatchNormalization(),
# layers.ReLU(),
# layers.Conv2D(512, (3, 3), (1, 1), padding="same", use_bias=True),
# layers.BatchNormalization(),
# layers.ReLU(),
# layers.MaxPool2D(pool_size=(2, 2), strides=(2, 2)),
# layers.Flatten(),
# # 由于原模型实在过于庞大,这里对全连接层进行优化
# layers.Dense(1024, activation='relu'),
# layers.Dense(1024, activation='relu'),
# layers.Dense(num_classes, activation='softmax')
# ])
# model = models.load_model("model/cnn/vgg-A-first-68.121652849743.9300385.h5")
print(model.summary())
# exit()
model.compile(
optimizer=optimizers.Adam(learning_rate=learning_rate),
loss=losses.CategoricalCrossentropy(),
metrics=['accuracy'])
history = model.fit(train_dataloader, batch_size=batch_size, epochs=epochs,
shuffle=True, validation_data=valid_dataloader)
# acc = model.evaluate(train_dataloader, return_dict=False)[1] * 100
# print("训练集准确率为:", acc)
acc = model.evaluate(test_dataloader, return_dict=False)[1] * 100
print("测试集准确率为:", acc)
acc = model.evaluate(valid_dataloader, return_dict=False)[1] * 100
print("验证集准确率为:", acc)
print("保存路径是:", f"model/{model_name}")
save_id = f"in-{dataset_name}-{model_name}-{version}v-{epochs}epochs-" \
f"{str(acc)[:str(acc).rfind('.') + 3]}-{time.time()}.h5"
model_save_loc = os.path.join(f"model/{model_name}", save_id)
model.save(model_save_loc)
tr_plot(history, 0)
preds = model.predict(test_dataloader)
print_info(test_dataloader, preds, 0, "tmp", model_name)
| NCcoco/kaggle-project | Bird-Species/train-by-easy-cnn.py | train-by-easy-cnn.py | py | 12,581 | python | en | code | 0 | github-code | 13 |
10010423569 | #Pong
import pygame
import random
pygame.init()
pygame.font.init()
BLACK = ( 0, 0, 0)
WHITE = ( 255, 255, 255)
size = (600, 400)
screen = pygame.display.set_mode(size)
myfont = pygame.font.SysFont('trebuchetms', 15)
pygame.display.set_caption("Pong")
carryOn = True
clock = pygame.time.Clock()
WIDTH = 600
HEIGHT = 400
BALL_RADIUS = 20
PAD_WIDTH = 8
PAD_HEIGHT = 80
LEFT = False
RIGHT = True
ball_position = [1,0]
ball_velocity = [1,0]
scoreLeft = 0
scoreRight = 0
paddle1_position = 0
paddle2_position = 0
paddle1_velocity = 0
paddle2_velocity = 0
def spawn_ball(direction):
global ball_position, ball_velocity, RIGHT, LEFT
x_val = 1 #random.randrange(1,3)
y_val = 1 #random.randrange(1,3)
ball_position[0] = WIDTH/2
ball_position[1] = HEIGHT/2
positions = [-4,-5,-3,3,4,5]
if direction == RIGHT:
ball_velocity[0] = random.randrange(4, 8) * x_val
ball_velocity[1] = positions[random.randrange(0,5)] * y_val
else:
ball_velocity[0] = random.randrange(-8,-4) * x_val
ball_velocity[1] = positions[random.randrange(0,5)] * y_val
print(ball_velocity[0])
def new_game():
global RIGHT, LEFT # these are numbers
spawn_ball(RIGHT)
def drawLogic(screen):
screen.fill(BLACK)
global scoreRight, scoreLeft, paddle1_position, paddle2_position ,paddle1_velocity, paddle2_velocity, ball_position, ball_velocity, LEFT, RIGHT
pygame.draw.circle(screen, WHITE, [int(ball_position[0]), int(ball_position[1])], 20, 0)
ball_position[0] += ball_velocity[0]
ball_position[1] += ball_velocity[1]
#keep ball on the screen
if ball_position[1] < 20:
ball_velocity[1] *= -1
elif ball_position[1] > 380:
ball_velocity[1] *= -1
if ball_position[0] < 8:
if ball_position[1] > paddle1_position and ball_position[1] < paddle1_position + PAD_HEIGHT:
ball_velocity[0] = -1 * ball_velocity[0]
ball_velocity[0] += ball_velocity[0] * 0.2
else:
spawn_ball(RIGHT)
scoreRight += 1
elif ball_position[0] > 592:
if ball_position[1] > paddle2_position and ball_position[1] < paddle2_position + PAD_HEIGHT:
ball_velocity[0] = -1 * ball_velocity[0]
ball_velocity[0] += ball_velocity[0] * 0.2
else:
spawn_ball(LEFT)
scoreLeft += 1
if paddle1_position + paddle1_velocity < 325 and paddle1_position + paddle1_velocity > -5:
paddle1_position += paddle1_velocity
if paddle2_position + paddle2_velocity < 325 and paddle2_position + paddle2_velocity > -5:
paddle2_position += paddle2_velocity
pygame.draw.line(screen, WHITE, [WIDTH/2, 0], [WIDTH/2, HEIGHT], 1)
pygame.draw.line(screen, WHITE, [PAD_WIDTH, 0], [PAD_WIDTH, HEIGHT], 1)
pygame.draw.line(screen, WHITE, [WIDTH - PAD_WIDTH, 0], [WIDTH - PAD_WIDTH, HEIGHT], 1)
pygame.draw.line(screen, WHITE, [0, paddle1_position], [0, paddle1_position + PAD_HEIGHT], 15)
pygame.draw.line(screen, WHITE, [600, paddle2_position], [600, paddle2_position + PAD_HEIGHT], 15)
textsurface = myfont.render('Left score: ' + str(scoreLeft), False, WHITE)
textsurface1 = myfont.render('Right score: ' + str(scoreRight), False, WHITE)
screen.blit(textsurface, (30, 20))
screen.blit(textsurface1, (330, 20))
def keydown(event):
global paddle1_velocity, paddle2_velocity
if event.key == pygame.K_DOWN:
paddle2_velocity += 5
if event.key == pygame.K_s:
paddle1_velocity += 5
if event.key == pygame.K_UP:
paddle2_velocity += -5
if event.key == pygame.K_w:
paddle1_velocity += -5
def keyup(event):
global paddle1_velocity, paddle2_velocity
if event.key == pygame.K_DOWN:
paddle2_velocity = 0
if event.key == pygame.K_s:
paddle1_velocity = 0
if event.key == pygame.K_UP:
paddle2_velocity = 0
if event.key == pygame.K_w:
paddle1_velocity = 0
while carryOn:
clock.tick(60)
drawLogic(screen)
# --- Main event loop
for event in pygame.event.get():
if event.type == pygame.KEYDOWN:
keydown(event)
if event.type == pygame.KEYUP:
keyup(event)
# User did something
if event.type == pygame.QUIT: # If user clicked close
carryOn = False
pygame.display.flip()
| snigui/pong | src/pong.py | pong.py | py | 4,351 | python | en | code | 0 | github-code | 13 |
5174844496 | import pandas as pd
import numpy as np
import torch
import torch.nn as nn
import heapq
import random
import time
from torch.autograd import Variable
#训练数据加载
delete=pd.read_csv("delete_normal_kdd.csv")
train_data=delete.iloc[:,:12]
train_label=delete.iloc[:,12]
train_data = np.array(train_data)
train_label = np.array(train_label)
#esl训练数据
#Xs=pd.read_csv("dasesm_new3.csv")
Xs=pd.read_csv("CESDDM.csv")
Xs=Xs.iloc[:,:12]
Xs = np.array(Xs)
#测试数据加载
test1=pd.read_csv("test.csv")
test_data=test1.iloc[:,:12]
test_label=test1.iloc[:,12]
test_data=np.array(test_data)
test_label=np.array(test_label)
#超参数
EPOCH=15
len1=20000#len(train_data)
len2=200
len3=len(test_data)
LR = 0.001
z=0.08
z1=z*1.4
z2=np.sqrt(np.square(z)+np.square(z1))
#距离 and 核函数
def kernel(x,y,l):
dist = 1 - np.dot(x, y) / (np.linalg.norm(x) * np.linalg.norm(y)) #余弦距离
up=np.square(dist)
down=2*np.square(l)
kx=np.exp(-(up/down)) #高斯核函数
return kx
def kernel1(x,l):
up=np.square(100*x)
down=2*np.square(l)
kx=np.exp(-(up/down)) #高斯核函数
return kx
#sigmoid
def sigmoid(x):
return 1.0/(1+np.exp(-x))
#fx
def fx_(x):
fx = 0
for i in range(len1):
fx=fx+kernel(x,train_data[i],z)
fx=fx/len1
return fx
#px
def px_(x):
px=0
for i in range(len2):
px=px+kernel(x,Xs[i],z)
px=px/len2
return px
#原始的Xs
def or_Xs():
Xs=[]
for i in range(len2):
Xs.append(train_data[i])
Xs = np.array(Xs)
return Xs
Xs=or_Xs()
#初始fx
def or_fx():
fx=[]
for i in range(len2):
a=fx_(Xs[i])
fx.append(a)
return fx
#初始px
def or_px():
px=[]
for i in range(len2):
a=px_(Xs[i])
px.append(a)
return px
fx=or_fx()
px=or_px()
#替换谁
def max_re(x,y):
maxj = []
for i in range(len2):
maxj.append(x[i] - y[i])
j = maxj.index(max(maxj))
return j
#是否替换
def max_r(x,y):
up=fx[x]*px_(y)
down=px[x]*fx_(y)
if down==0:
a=1
else:
a=up/down
a=1-a
c=max(a,0)
return c
#esl_loss
def esl_loss():
loss=0
for i in range(len2):
a=fx[i]/px[i]
loss+=a
loss=loss/len2
return loss
#decoder
class Decoder(nn.Module):
def __init__(self, input_size, output_size):
super().__init__()
self.linear1 = nn.Linear(input_size, 32)
#self.linear2 = nn.Linear(64, 32)
self.linear3 = nn.Linear(32, 24)
self.linear4 = nn.Linear(24,output_size)
self.relu = nn.ReLU(True)
self.sigmoid = nn.Sigmoid()
def forward(self, z):
out = self.linear1(z)
out = self.relu(out)
#out = self.linear2(out)
#out = self.relu(out)
out = self.linear3(out)
out = self.relu(out)
out = self.linear4(out)
decoder = self.sigmoid(out)
return decoder
#归一化
def minmaxscaler(data):
min = np.amin(data)
max = np.amax(data)
return (data - min)/(max-min)
#训练
device = torch.device('cuda')
Coder= Decoder(20,12).to(device)
print(Coder)
optimizer = torch.optim.Adam(Coder.parameters(),lr=LR)
loss_func = nn.MSELoss()
t = 1
for epoch in range(EPOCH):
epoch_loss = []
p=0
for i in range(len2, len1):
#start=time.time()
gx = []
max_gx=[]
for j in range(len2):
gx.append(kernel(train_data[i], Xs[j], z))
train_data1=np.array(train_data[i])
data_tensor=minmaxscaler(train_data1)
data_tensor =torch.tensor(data_tensor,requires_grad=True)
data_tensor = data_tensor.float().to(device)
gx_tensor=np.array(gx)
#max_gx = heapq.nlargest(10, gx_tensor)
index =map(gx.index, heapq.nlargest(20, gx))
#index1 = sorted(list(index1))
#index = map(gx.index, heapq.nsmallest(20,heapq.nlargest(30, gx)))
index = sorted(list(index))
for k in range(len(index)):
max_gx.append(gx[index[k]])
max_gx1=torch.tensor(max_gx,requires_grad=True)
max_gx1=max_gx1.float().to(device)
decoder = Coder(max_gx1)
loss = loss_func(decoder, data_tensor)
optimizer.zero_grad()
max_gx1.retain_grad()
loss.backward()
gxd=(max_gx1.grad).tolist()
max_gxd=list(filter(lambda x: x > 0, gxd))
xs_max=[gx_tensor.tolist().index(i) for i in [max_gx[i].tolist() for i in [gxd.index(i) for i in list(filter(lambda x: x > 0, gxd))]]]
min_gxd=list(filter(lambda x: x<0,gxd))
xs_min=[gx_tensor.tolist().index(i) for i in [max_gx[i].tolist() for i in [gxd.index(i) for i in list(filter(lambda x: x < 0, gxd))]]]
optimizer.step()
if t>3000:
t=3000
a = (1.2 * t) / ((1.2 * t) + 1)
b = 1 - a
for k in range(len(xs_max)):
#fx[xs_max[k]] = (a * fx[xs_max[k]]) + (b * kernel1(max_gxd[k], z))
fx[xs_max[k]] = (a * fx[xs_max[k]]) + (b * 0.1*sigmoid(100*max_gxd[k]))
#a1=fx[xs_max[k]]
#a2=px[xs_max[k]]
for k in range(len(xs_min)):
#fx[xs_min[k]] = (a * fx[xs_min[k]]) - (b * kernel1(min_gxd[k], z))
fx[xs_min[k]] = (a * fx[xs_min[k]]) - (b * 0.1*sigmoid(100*min_gxd[k]))
if fx[xs_min[k]]<0:
fx[xs_min[k]]=0
#a1=fx[xs_min[k]]
#a2=px[xs_min[k]]
for k in range(len2):
if k not in xs_max:
if k not in xs_min:
fx[k]=a*fx[k]
else:
continue
else:
continue
re = max_re(px, fx)
r = random.random()
n=max_r(re,train_data1)
if r<n:
Xs[re]=train_data1
fx[re]=fx_(Xs[re])
px[re]=px_(Xs[re])
p=p+1
eloss = esl_loss()
print(eloss)
epoch_loss.append(loss.item())
t = t + 1
print(p)
print("epoch {}: {}".format(epoch+1, sum(epoch_loss)/len(epoch_loss)))
#测试
#torch.save(Coder,'9.1.pkl')
#df=pd.DataFrame(Xs)
#df.to_csv('9.1.csv', index=False, header=False)
Coder = torch.load('CESDDM.pkl')
TP, TN, FP, FN = 0, 0, 0, 0
Coder.eval()
with torch.no_grad():
for i in range(0, len3):
gx = []
max_gx = []
for j in range(0, len2):
gx.append(kernel(test_data[i], Xs[j], z))
test_data1 = np.array(test_data[i])
data_tensor = minmaxscaler(test_data1)
data_tensor = torch.as_tensor(data_tensor)
data_tensor = data_tensor.float().to(device)
gx_tensor = np.array(gx)
#max_gx = heapq.nlargest(10, gx_tensor)
index = map(gx.index, heapq.nlargest(20, gx))
#index = map(gx.index, heapq.nsmallest(20, heapq.nlargest(30, gx)))
index = sorted(list(index))
for k in range(len(index)):
max_gx.append(gx[index[k]])
loss1 = np.mean(max_gx)
max_gx = torch.as_tensor(max_gx)
max_gx = max_gx.float().to(device)
#gx_tensor = torch.as_tensor(gx_tensor)
#gx_tensor = gx_tensor.float().to(device)
decoder = Coder(max_gx)
loss = loss_func(decoder, data_tensor)
b=test_label[i]
if loss1 > 0.694 and loss < 0.0515:
a = 0
else:
a = 1
if a==1 and b==1:
TP += 1
if a==0 and b==0:
TN += 1
if a==1 and b==0:
FP += 1
if a==0 and b==1:
FN += 1
P = TP / (TP + FP )
R = TP / (TP + FN )
F1=(2*P*R)/(P+R)
p1= (TP + TN)/ (TP+TN+FN+FP)
#acc=right/len3
print(P)
print(R)
print(p1)
print(F1)
print(TP)
print(TN)
print(FP)
print(FN) | ColeGroup/2023SunJun | model/CESDDM/CESDDM.py | CESDDM.py | py | 7,701 | python | en | code | 0 | github-code | 13 |
13486888005 | # -*- coding: utf-8 -*-
"""
Created on Wed Nov 17 10:56:31 2021
@author: soyrl
"""
#Import libraries and dependencies
import os
import pydicom as dicom
import numpy as np
import cv2
import time
# from termcolor import colored
import matplotlib.pyplot as plt
import pandas as pd
from joblib import Parallel, delayed
from tqdm import tqdm
import sys # Save terminal output to txt file
import traceback #Print errors when occur
import torch
import torchvision.ops.boxes as bops #calculate overlap between two nodule boxes
#Input and Output paths
data_path= "H:\My Desktop\emphysema_all\emphysema_exp_scans/advanced_scans/" #Folders with scans
#For BMI "H:\My Desktop/BMI_exp/BMI_low_scans_new/"
output_path="H:/My Desktop/adv_29-3/" #Any name
#Path of ground truth nodules for a specific emphysema degree
ground_truth_path="H:\My Desktop\emphysema_all\emphysema_exp_scans/advanced_gt_only_new_added/"
#For BMI "H:\My Desktop/pat_BMI_red_latest_new/"
#Path of AI nodules and their volume - created with file aorta_calcium_lungnodules.ipynb
AI_path= "H:\My Desktop/emphysema_all/allemphexper_AI_new_latest_14-3.xlsx"
#"H:\My Desktop/BMI_exp_AI_13-1.xlsx"
if not os.path.exists(output_path): #Create folder to save images
os.mkdir(output_path)
#Create lists of input and output paths to use with Parallel processing library
inputs=[]
outputs=[]
#Loop over all patient folders and add paths to input and output lists
for path,subdir,files in os.walk(data_path): #each time gets into next subdir and repeats
#path,subdir,files for a specific directory each time
result_path='' #Initialize string to be filled with directory name
if len(files)>0:
result_path=output_path+path.split('/')[-1] #Path of specific patient
if not os.path.exists(result_path): #Create folder to save images
os.mkdir(result_path)
outputs.append(result_path)
inputs.append(path)
start=time.time() #To count time to run all steps
def CT_files_extract(input_path,output_path,dpi=1200):
'Extracts all CT files (AI, original scans along with annotated CT images), the num of AI nodules,'
'as well as the total slices in the SEG files, in AI output, and the total num of CT files (original scan).'
'We also get possible errors. It is assumed that the input path contains only DICOM files of a specific patient'
"input_path: Path of patient with nodules - should contain files of one patient only"
"output_path: Path where information and images will be saved"
"dpi: Specifies resolution of output images"
start=time.time() #To count the time for this function to run
#Save output of this step to txt file
sys.stdout = open(output_path+'_step1.txt', 'w')
if not os.path.exists(output_path): #Create folder to save images
os.mkdir(output_path)
num=0 #Counter of the number of files for this patient
size_AI=0 #Count the number of AI slices - In case that SEG files have different number of slices
size_CTs=0 #Count the number of CT files - original scan and annotations files together
AI_slices=[] #To save number of AI slices that contain nodules
#Initialize empty lists to save CT file names with images, and number of slices in SEG files below
CTfiles=[]
size_SEG=[]
#Initialize empty dictionary to keep track of AI output slices and the number of nodules they contain
AI_num_nods={}
#List to be filled with problematic SEG files, if any, as well as unknown file types and info if SEG files do not have the same number of slices
errors_SEG=[]
#https://python.plainenglish.io/how-to-design-python-functions-with-multiprocessing-6d97b6db0214
#https://github.com/tqdm/tqdm#usage
for file in tqdm(os.listdir(input_path),desc='step1',ascii=True): #Parallel loop into each patient
if input_path.endswith('/'): #Ensure that path ends with '/'
file_path=input_path+file #subdir with files
else:
file_path=input_path+'/'+file
if os.path.isfile(file_path): #If we have a file and not a subdir
dicom_file=dicom.dcmread(file_path) #Read file
num+=1 #Increase files counter by 1
print("Processing file {}/{} ...".format(num,len(os.listdir(input_path))))
if dicom_file.Modality=='CT': #For CT images
image=dicom_file.pixel_array #Load CT slice
if len(np.unique(image))==1: #Problem if only 0 values
print("CT image slice of file {} is empty".format(file))
# Save CT slices based on their resolution
# Higher resolution images also have annotations on the figure
print("CT file name is: {}".format(file))
# plt.ioff()
# plt.figure()
# plt.imshow(image)
# plt.title(file)
# plt.savefig(output_path+'/'+str(image.shape[0])+'_'+str(file[:-4])+'.png',dpi=dpi) # file[:-4] was used to avoid '.dcm' ending
# plt.close()
#cv2 gives empty image since range from 0- ~3000 for original scan images
if image.shape[0]==512: #Original scan slice or annotated image
CTfiles.append(file) #Save information about file name
if len(file.split('.')[3])==1: #Increase size_CT only for original scan - just one value in that position eg. 4 or 6
size_CTs=size_CTs+1
else:
if image.shape[0]==1024:
if np.sum(image[900:,960:])!=0: #Ensure that we don't have graphs with HU information
if file.split('.')[4] in AI_slices:
continue
else:
size_AI=size_AI+1 #Increase number of AI slices (should be same as original scan)
AI_slices.append(file)#.split('.')[4])
#Assess differences between channels to find red and yellow nodule annotations of AI
#Threshold if no nodules detected is ~500 (from 492-527 for patient 695269) - not from 'Not for clinical use' but from some black pixels!
#If we plot image[np.where(image[:,:,1]!=image[:,:,2])] we will see a black vertical line of those different pixels
# if len(np.where(image[:,:,1]!=image[:,:,2])[0])<=600:
# print("no AI for file {}".format(file))
# print(len(np.where(image[:,:,1]!=image[:,:,2])[0]))
if len(np.where(image[:,:,1]!=image[:,:,2])[0])>600: #threshold for at least 1 nodule - 1000 for at least 2, if needed
#Resize AI image to (512,512) - same size as SEG and CT files below, convert to HSV and get mask for red and yellow
AI_image=image.copy() #As a good practice - to ensure that we don't change the original image
AI_512=cv2.resize(AI_image, dsize=(512, 512), interpolation=cv2.INTER_CUBIC) #Resize to (512,512)
AI_hsv=cv2.cvtColor(AI_512,cv2.COLOR_BGR2HSV) #Convert from BGR to HSV colorspace
mask_im_red=cv2.bitwise_and(AI_hsv,AI_hsv, mask=cv2.inRange(AI_hsv, (100,0,0), (200, 255, 255))) #Red mask - lower and upper HSV values
mask_im_red[0:50,:,:]=0 #Set top pixels that mention 'Not for clinical use' to zero - ignore them and keep only nodules
# Maybe needed for nodule identification - avoid duplicates - also (80,0,70) and (80,0,130) but best (80,100,0)
# mask_im_yellow=cv2.bitwise_and(AI_hsv,AI_hsv, mask=cv2.inRange(AI_hsv, (80,0,110), (110, 255, 255))) #Yellow mask
# cv2.imwrite(output_path+'/'+str(image.shape[0])+'_'+str(file[:-4])+'_yellow'+'.png',mask_im_yellow) #Save yellow mask - range from 0-255
#Until here mask_im_red is a color image with range of values from 0-255
#Now we convert from BGR (emerged with bitwise operation) to grayscale to have same shape (512,512) as SEG files
#It is used below to take a thresholded image
mask_im_red_gray=cv2.cvtColor(mask_im_red,cv2.COLOR_BGR2GRAY) #Also changes range of values
if len(np.unique(mask_im_red_gray))!=1: #If there is a nodule in the image
#Get a smoothed (thresholded) image with only nodules
mask_red_thresh = cv2.threshold(mask_im_red_gray,128,255,cv2.THRESH_BINARY)[1]
#Activate below to get mask without red box around it
# cv2.imwrite(output_path+'/'+str(image.shape[0])+'_'+str(file[:-4])+'_mask_no_box'+'.png',mask_red_thresh) #Save it
#Get contours for each nodule and create a rectangle around them
# mask_im_red_RGB=cv2.cvtColor(mask_im_red_gray,cv2.COLOR_GRAY2RGB) #Convert grayscale to RGB (not BGR as before)
contours = cv2.findContours(mask_red_thresh, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
contours = contours[0] if len(contours) == 2 else contours[1]
if len(contours)>0:
checks={} #Empty dictionary to keep track of nodule locations of a specific slice
for index,cntr in enumerate(contours):
x,y,w,h = cv2.boundingRect(cntr)
checks[index]=[x,y,w,h]
length=0 #find overlapping boxes
for index,values in enumerate(list(checks.values())):
x,y,w,h=values
try: #To avoid run out of index error
for index1 in range(index+1,len(list(checks.values()))): #Loop over box locations and check for overlap
x1,y1,w1,h1=list(checks.values())[index1]
if (x<=x1 and x1<=x+w and y<=y1 and y1<=y+h):
length=length+1
except:
pass
if length>0:
print("There are overlapping boxes in file {}".format(file))
AI_num_nods[file]=len(contours)-length
# for cntr in contours:
# x,y,w,h = cv2.boundingRect(cntr)
# cv2.rectangle(mask_im_red_RGB, (x, y), (x+w, y+h), (0, 0, 255), 1) #Last argument thickness of box
# cv2.imwrite(output_path+'/'+str(image.shape[0])+'_'+str(file[:-4])+'_thresh_box'+'.png', mask_im_red_RGB) #Plot boxes on colored image
# #For a colored-like version
# plt.ioff()
# plt.figure()
# plt.imshow(mask_im_red_gray)
# plt.title(file)
# plt.savefig(output_path+'/'+str(image.shape[0])+'_'+str(file[:-4])+'_colored_thresh'+'.png',dpi=dpi)
# plt.close()
print('\n')
elif dicom_file.Modality=='SEG': #For SEG files
image=dicom_file.pixel_array #Load 3D segmentation file
if np.sum(image)<5000:#Count number of proper segmentation files - If above that then we have yellow lines on bottom of the scan
print("SEG file {} has sum of pixels <5000 and equal to {}".format(file,np.sum(image)))
size_SEG.append(image.shape[0]) #Confirm below that all SEG files have the same number of slices
else:
print("File {} has sum of pixels {}".format(file,np.sum(image)))
errors_SEG.append('The following has more than 5000 pixels as Segmentation - empty curve line')
errors_SEG.append(file)
#Just an empty curved line - Can be plotted below
# for curved_line in range(image.shape[0]):
# plt.figure()
# plt.imshow(image[curved_line,:,:])
# plt.title(file)
# plt.savefig(output_path+'/'+'512_lines_'+str(curved_line)+str(file[:-4])+'.png',dpi=dpi)
# plt.close()
# #or better with cv2
# cv2.imwrite(output_path+'/'+'512_lines_'+str(curved_line)+str(file[:-4])+'.png',image[curved_line,:,:])
elif dicom_file.Modality=='SR': #For SR files - Will not be encountered
print("File {} is a SR".format(file))
print("\n")
else: #For any other type of file - We don't expect anything else
print("File {} does not belong to any of the above categories and is a {} file".format(file, dicom_file.Modality))
# print(dicom_file_final)
print("\n")
errors_SEG.append('Unexpected type of file occured in file:')
errors_SEG.append(file)
#Verify that all SEG files have same number of slices
if len(np.unique(size_SEG))==1: #We should only have one number, the number of slices
size_SEG=int(size_SEG[0]) #Convert list to number
elif len(np.unique(size_SEG))==0:
errors_SEG.append('No Segmentation Files available')
else:
print("ERROR: Segmentation files have different number of slices!")
errors_SEG.append('Not all SEG files have the same number of slices for file'+str(input_path))
errors_SEG.append('Here are the different num of slices for the previous file'+str(np.unique(size_SEG)))
end=time.time()
print("Total time to run CT extraction (step1) was {} secs".format(end-start))
print("\n")
#Save output to text file
sys.stdout.close()
sys.stdout = sys.__stdout__
return CTfiles, size_SEG, errors_SEG, AI_num_nods, size_CTs, size_AI, AI_slices
def annotated_CTs_to_normal_scan(input_path,CT_files,output_path):
'Correlates annotated CT slices with slices of the original scan.'
'It also returns empty CT slices and files with possible missed nodules due to low threshold'
"input_path: Path of patient with nodules"
"CT_files: Names of all CT files in a folder (output of step1 above)"
start=time.time() #To count the time for this function to run
sys.stdout = open(output_path+'_step2.txt', 'w') #Save output to txt file
original_CTs=[] #Empty list to be filled with names of original CT slices
annotated_CT_files=[] #Empty list to be filled with names of annotated CT slices
empty_CT_files=[] #List to be filled with files with errors
possible_nodules_excluded=[] #To be filled with possible cases of nodules not found due to low threshold
file_num=[x.split('.')[3] for x in CT_files] #Get numbers of CT files representing either original slice (eg. 4) or annotated slice (eg. 1042)
annot_files_total=len([name for name in file_num if len(name)>1]) #This is the number of total annotation files available for a patient
for index,annotated_CT in enumerate(tqdm(CT_files,desc='step2',ascii=True)): #Loop over CT_files to get annotated CT images
annotated_CT_path=input_path+'/'+annotated_CT #Path of annotated CT file
dicom_annotated_CT=dicom.dcmread(annotated_CT_path) #Read DICOM file
image_annotated_CT=dicom_annotated_CT.pixel_array #Load CT slice
# print(colored('Working on CT image number {}', 'blue').format(index)) #To print color output in terminal
print('Working on CT image number {}'.format(index))
#Check if above corresponds to annotated CT slice - Such files have a note of the bottom right corner of image ("F" symbol)
if len(np.where(image_annotated_CT[410:,468:]!=0)[0])>100: #If there is an 'F' symbol then we have an annotated CT slice
#Loop over all CT slices to find correspondance with annotated image
for file in os.listdir(input_path):
file_path=input_path+'/'+file #specific file path
if os.path.isfile(file_path): #If we have a file and not a subdir
dicom_file=dicom.dcmread(file_path) #Read file
if dicom_file.Modality=='CT': #Confirm that we indeed have CT slices here
image_CT=dicom_file.pixel_array #Load CT slice
if len(np.unique(image_CT))==1: #Problem if only 0 values - should not happen
print("CT image slice of file {} is empty".format(file))
empty_CT_files.append(file)
continue #to go to next file - or 'break' to exit loop
# Find differences only in 512*512 CT slices - 1024*1024 are AI outputs
if image_CT.shape[0]==image_annotated_CT.shape[0] and len(file.split('.')[3])<3: #To get actual scan and not AI output
#Find number of different pixel values between the 2 images
differences=np.where(image_annotated_CT!=image_CT)
#To check if there are potential nodules missed by the low threshold
if len(differences[0])>=1000 and len(differences[0])<60000:
print("Number of different pixels are {} for files {} and {}".format(len(differences[0]),file,annotated_CT))
possible_nodules_excluded.append([annotated_CT,file])
#If the different pixels are not 0 (same as annotated CT image) and if it is less than 1000 pixels (threshold set by me)
if len(differences[0])<1000 and len(differences[0]!=1):
original_CTs.append(file)
annotated_CT_files.append(annotated_CT)
print("Annotated CT slice of file {} is the same as of CT file {}".format(annotated_CT,file))
print("\n")
break #To save time otherwise keep looping - Stops 'correspondance' loop
else:
continue #Go to next file
if len(annotated_CT_files)<annot_files_total: #When we looped in all available annotation CT files then break to avoid extra looping
continue
else:
break
end=time.time()
print("Total time to run annotated_CT_to_normal_slices (step2) was {} secs".format(end-start))
print("\n")
sys.stdout.close()
sys.stdout = sys.__stdout__
return original_CTs, annotated_CT_files, empty_CT_files, possible_nodules_excluded
def mask_seg_slices(original_CTs,input_path,output_path,annotated_CT_files,dpi=1200):
'Plots the nodules in the SEG files and returns correspondance between SEG files with original CT slices'
'and annotated ones, as well SEG slices with errors. It also returns images with and without bounding boxes around nodules'
'At last, it returns'
"original_CTs: Original CT slices with nodules (output of step2 above)"
"input_path: Path of patient with nodules"
"output_path: Path where information and images will be saved"
"annotated_CT_files: Annotated CT slices (output of step2 above)"
"dpi: Specifies resolution of output images"
start=time.time() #To count the time for this function to run
#Save output to txt file
sys.stdout = open(output_path+'_step3.txt', 'w')
#Empty lists to be filled with the final names of the files
SEG_masks=[]
original_CTs_final=[]
annotated_CTs_final=[]
SEG_masks_errors=[]
for file in tqdm(os.listdir(input_path),desc='step3',ascii=True):
file_path=input_path+'/'+file #specific file path
if os.path.isfile(file_path): #If we have a file and not a subdir
dicom_SEG=dicom.dcmread(file_path) #Read file
if dicom_SEG.Modality=='SEG': #Only interested in SEG files
SEG_image_3D=dicom_SEG.pixel_array #Load 3D segmentation file
if np.sum(SEG_image_3D)<5000: #Some SEG files with sum>5000 give yellow lines - avoid that error
print("SEG file {} is being processed. The sum of SEG pixels is {}".format(file,np.sum(SEG_image_3D)))
print("\n")
SEG_3D=np.where(SEG_image_3D!=0) #Here are the pixels that belong to nodules
#Get slices in which nodules exist
nodule_slices=np.unique(SEG_3D[0])
print("{} are slices with nodules".format(nodule_slices))
#Get slices with maximum number of pixels having a nodule
nodule_sum=[0] #Initialize list of maximum numbers of pixels with 0 to keep track of that max
slicenum=[] #List to be filled with slices of max pixels
for slice in nodule_slices: #Loop over slices with nodules
if np.sum(SEG_image_3D[slice,:,:])>=nodule_sum[-1]: #If the maximum number of pixels in that slice is bigger or equal to the last max
nodule_sum.append(np.sum(SEG_image_3D[slice,:,:])) #Add that maximum to our list
slicenum.append(slice) #Add that slice to our list
nodule_sum=np.array(nodule_sum) #Convert list to array
max_sum=np.max(nodule_sum) #Get maximum of all slices
index_max_sum=np.where(nodule_sum==max_sum) #Get index of that maximum
num_maxs=len(index_max_sum[0]) #Get how many slices exist with that maximum - sometimes more than 1
slicenum=np.asarray(slicenum) #Convert list with slices to array
print("Slices with the most nodule pixels: {}".format(slicenum[-num_maxs:]))
print("Sum of nodule pixels in those slices is/are {}".format(max_sum))
print("\n")
#Save segmentation slice with nodule that corresponds to the same slice as original_CT slice
#We check and compare only with CT slices with nodules that were given
for num_slice in slicenum: #Loop over slices with max pixels - not just in slicenum[-num_maxs] since some images are outside of that range
for index,CTfile in enumerate(original_CTs): #Loop over original CT slices that have nodules
slice_number=CTfile.split('.')[4] #Get slice number from original CT file name
#Get Slice number from dicom header instead of image name
get_slice=dicom.dcmread(input_path+'/'+CTfile) #Read file
if int(get_slice.InstanceNumber)==int(slice_number):#Just confirmation - should always be true
if num_slice==SEG_image_3D.shape[0]-int(slice_number):#Since slices here are in reverse order
SEG_image=SEG_image_3D[num_slice,:,:].copy() #Get a copy of the SEG slice with a nodule in the corresponding original CT slice
thresh = cv2.threshold(SEG_image,0.5,255,cv2.THRESH_BINARY)[1] #Since a binary image (0s and 1s) set a threshold in the middle
# cv2.imwrite(output_path+'/'+str(file[:-4])+'_SEG_no_box'+'.png',thresh) #Nodules without bounding box around them - values only 0 or 255
SEG_image_color=cv2.cvtColor(thresh,cv2.COLOR_GRAY2RGB) #Convert back to RGB - values again only 0 or 255
#Find contours of objects in that image
contours = cv2.findContours(thresh, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
contours = contours[0] if len(contours) == 2 else contours[1]
for cntr in contours: #Loop over contours and plot boxes in the colored image
x,y,w,h = cv2.boundingRect(cntr)
cv2.rectangle(SEG_image_color, (x, y), (x+w, y+h), (0, 0, 255), 1)
#Plot image below and not here since we may have many nodules and many slices that we want to plot
#Save segmentation mask slice
# #For colored-like version
# plt.ioff() #Do not open figure window
# plt.figure()
# plt.imshow(SEG_image)
# plt.title("Slice {}".format(num_slice))
#For segmentation masks activate imwrite below
# if num_slice in slicenum[-num_maxs:]: #Loop over slices with maximum number of nodule pixels only
# cv2.imwrite(output_path+'/'+str(file[:-4])+'_slice_'+str(num_slice)+'_max'+'.png',SEG_image_color)
# # #For a colored-like version - Binary image with box
# # plt.savefig(output_path+'/'+str(file[:-4])+'_'+'slice_'+str(num_slice)+'_max_colored'+'.png',dpi=dpi) #Colored nodule
# else: #Also save the slices not with a maximum number of pixels. Some of them may be needed
# cv2.imwrite(output_path+'/'+str(file[:-4])+'_slice_'+str(num_slice)+'_not_max'+'.png',SEG_image_color)
# # #For a colored-like version
# # plt.savefig(output_path+'/'+str(file[:-4])+'_slice_'+str(num_slice)+'not_max_colored'+'.png',dpi=dpi)
# plt.close()
annotation_CT=dicom.dcmread(input_path+'/'+annotated_CT_files[index]) #Read annotated_CT file
annotation_CT_image=annotation_CT.pixel_array #Get its image
CT_image=get_slice.pixel_array #Get image of original CT
nodule=np.where(annotation_CT_image!=CT_image) #Find different pixels - nodule locations
if np.sum(SEG_image_color[nodule])!=0 and len(contours)<2: #If nodule locations of annotated_CT exist in SEG file
#and excluding SEG files if they have 2 or more nodules
#Save SEG masks, original CT slices and annotated slices in corresponding order
SEG_masks.append(file)
original_CTs_final.append(CTfile)
annotated_CTs_final.append(annotated_CT_files[index])
if file not in SEG_masks: #Add file in errors if not added to the list above
SEG_masks_errors.append(file)
if len(annotated_CT_files)-len(annotated_CTs_final)==1 and len(SEG_masks_errors)==1: #If only one file in errors then it should exist correspondance - add it to lists
SEG_masks.append(SEG_masks_errors[0])
original_CTs_final.append(list(set(original_CTs) - set(original_CTs_final))[0])
annotated_CTs_final.append(list(set(annotated_CT_files) - set(annotated_CTs_final))[0])
if annotated_CTs_final==[] or original_CTs_final==[]:
annotated_CTs_final=annotated_CT_files
original_CTs_final=original_CTs
end=time.time()
print("Total time to find correspondance between SEG files to annotated and original CT slices (step3) was {} secs".format(end-start))
print("\n")
if SEG_masks!=[]:
if len(np.unique(annotated_CT_files))>len(np.unique(annotated_CTs_final)):
print('REMARK: SEG Files Ignored since they are less or equal to annotated_CT files - {} vs {}'.format(len(np.unique(SEG_masks)),len(np.unique(annotated_CT_files))))
sys.stdout.close()
sys.stdout = sys.__stdout__
return [],original_CTs,annotated_CT_files,[]
else:
sys.stdout.close()
sys.stdout = sys.__stdout__
return SEG_masks,original_CTs_final,annotated_CTs_final, SEG_masks_errors
else: #In case no SEG files exist
sys.stdout.close()
sys.stdout = sys.__stdout__
return [],original_CTs,annotated_CT_files,[]
#1st function - extract CT, AI files, number of nodules, size of AI, CT and SEG files, and SEG errors
CTfiles, size_SEG, errors_SEG, AI_num_nods, size_CTs,size_AI, AI_slice_names =zip(*Parallel(n_jobs=-1)(delayed(CT_files_extract)(path,outputs[index],dpi=200) for index,path in enumerate(inputs)))
#2nd function - correspondance between annotated and original scans
original_CTs, annotated_CT_files, empty_CT_files, possible_nodules_excluded=zip(*Parallel(n_jobs=-1)(delayed(annotated_CTs_to_normal_scan)(path,CTfiles[index],outputs[index]) for index,path in enumerate(inputs)))
#3rd function - correspondance between SEG files, annotated_CT images, and original scan slices
SEG_masks,original_CTs_final,annotated_CTs_final,SEG_masks_errors=zip(*Parallel(n_jobs=-1)(delayed(mask_seg_slices)(original_CTs[index],path,outputs[index],annotated_CT_files[index],dpi=200) for index,path in enumerate(inputs)))
#Create dataframe with all participants and nodules
#Initialize columns to be filled below
column_names=['participant_id','AI_nod1','AI_nod2','AI_nod3','AI_nod4','AI_nod5','AI_nod6','AI_nod7','AI_nod8',
'AI_nod9','AI_nod10','V1','V2','V3','V4','V5','V6','V7','V8','V9','V10',
'0-100tp','100-300tp','300+ tp','0-100fp','100-300fp','300+ fp','0-100fn','100-300fn','300+ fn']
df_all=pd.DataFrame(columns=column_names) #Initialize it
#Output File - Final computations
patient_names=[] #List to be filled with the patient IDs
all_volumes=[] #List to be filled with final list of volumes of nodules (TP and FP)
FP_num=[] #List to be filled with the number of FP findings for each patient
AI_pats={} #AI slices with nodules
AI_pats_vols={} #Volumes of AI slices with nodules
AI_pats_slices={} #All AI slices - original names
RedCap_pats={} #Slices with RedCap annotations
RedCap_pats_vols={} #Volumes of RedCap annotated slices
RedCap_ids={} #IDs just to be used for excel export at the end
for index_init,path in enumerate(inputs): #Loop over each patient
error_flag=0 #For cases with any errors that should be checked manually
error_flag_images=0 #For cases with errors only in output images - only images should be checked manually
distance_error=0 #Flag for cases with nearby TP nodules in which ID are confused.
sys.stdout = open(outputs[index_init]+'output.txt', 'w') #Save output here
patient_names.append(path.split('/')[-1]) #Add patient ID to this list
#Load ground truth nodules in a dataframe and get an array of integers with the slices of the ground truth nodules
try: #To ensure that manual annotations from REDCap exist
ground_truth_nodules=pd.read_csv(ground_truth_path+path.split('/')[-1]+'.csv') #Read CSV file with REDCap annotations
slice_cols=[col for col in ground_truth_nodules.columns if 'slice' in col] #Get list of column names with nodules
slice_vals=ground_truth_nodules[slice_cols].values #Get list of slices with nodules
slice_vals=slice_vals[~np.isnan(slice_vals)] #Exclude NaN values
slice_vals=slice_vals.astype(int) #Convert them to integers
print("Slices with nodules from REDCap are {}".format(slice_vals))
RedCap_pats[path.split('/')[-1]]=slice_vals
#Check first and last 10 slices of patients to see if GT slices with nodules exist there
#If so, our algorithm may fail - In 137966 nodule in first 10 slices. The algorithm worked fine in that
for j in slice_vals:
if j<10 or j>size_CTs[index_init]-11: #between 20-30 slices exist
print("In patient {} first 10 or last 10 slices contain nodule - Slice {}".format(path.split('/')[-1],j))
ids=[col for col in ground_truth_nodules.columns if 'nodule_id' in col] #Get list of column names with nodule IDs
nodule_ids=ground_truth_nodules[ids].values #Get list of nodule IDs
nodule_ids=nodule_ids[~np.isnan(nodule_ids)] #Exclude NaN values
nodule_ids=nodule_ids.astype(int) #Convert them to integers
print("Nodule IDs are: {}".format(nodule_ids))
RedCap_ids[path.split('/')[-1]]=nodule_ids
vol_ids=[col for col in ground_truth_nodules.columns if 'volume_solid' in col] #Get list of column names with volume of solid nodules
volume_ids=ground_truth_nodules[vol_ids].values #Get values of volumes of solid nodules
temp_solid=volume_ids
missing=np.where(np.isnan(volume_ids)) #Find where we don't have a value for volumes of solid nodules
vol_ids_sub=[col for col in ground_truth_nodules.columns if 'volume_subsolid' in col] #Get list of column names with volume of subsolid nodules
volume_ids_sub=ground_truth_nodules[vol_ids_sub].values #Get values of volumes of subsolid nodules
#Since they are a list of list and we just want the inner list for volumes
volume_ids=volume_ids[0]
volume_ids_sub=volume_ids_sub[0]
#If the solid component is <30mm3 we also keep the subsolid one. If it's >30mm3 we ignore subsolid, if exists.
#If we split based on groups (30-100 and/or 100-300mm3) this might result in having a few nodules belong to wrong volume subgroups.
for ind, vol in enumerate(volume_ids):
if vol<30 or np.isnan(vol)==True: #If volume of solid component <30mm3 or non-existent
try:
if volume_ids_sub[ind]>=30 and np.isnan(vol)==False: #If solid component exists (is smaller than 30mm3) and subsolid >30mm3
volume_ids[ind]=vol+volume_ids_sub[ind]
print('Combined solid and subsolid components since volume of solid <30mm3')
elif volume_ids_sub[ind]>=30 and np.isnan(vol)==True: #If solid component doesn't exist and subsolid >30mm3
volume_ids[ind]=volume_ids_sub[ind]
print("Kept only subsolid component since there was no solid component")
except:
pass
if vol>=30: #If solid component >30mm3
try: #To avoid error in 130781 without any subsolid components
if volume_ids_sub[ind]>0 and temp_solid[ind]>0: #Just to print information of subsolid component as well
print("Nodule with ID {} and volume {} has 2 components but only solid considered".format(nodule_ids[ind],temp_solid[ind]))
print('Volume of subsolid component is {}'.format(volume_ids_sub[ind]))
except:
pass
print("Volumes of the above nodules are: {}".format(volume_ids))
print("\n")
RedCap_pats_vols[path.split('/')[-1]]=volume_ids #Add volumes to the respective dictionary
except: #If manual annotations file not found
print("ERROR: No manual Annotations file from REDCap available")
slice_vals=[]
continue
#Extract information from AI file
try:
df=pd.read_excel(AI_path,index_col=0) #Read Dataframe with AI nodules, using first column as indices
for file in os.listdir(path): #Loop over all AI files and get information from first slice (0)
#Last condition added to ensure that we get AI slice and not 'Results', as happened in 105179
if len(file.split('.')[3])>1 and int(file.split('.')[4])==0 and int(file.split('.')[3])>=2000:
dcm_file=dicom.dcmread(path+'/'+file)
slice_location=float(dcm_file.SliceLocation)
spacing=float(dcm_file.SpacingBetweenSlices)
slice_number=int(dcm_file.InstanceNumber)
assert slice_number==0
break
total_slices=size_AI[index_init] #total number of AI slices
nod_locations=df.loc[int(path.split('/')[-1])][10:-1].values[df.loc[int(path.split('/')[-1])][10:-1].values!='-']
#nod locations in the order of L01, L02 etc of AI detections
actual_slice=((-1/spacing)*nod_locations)+(slice_location/spacing)+total_slices #slice_number
actual_slice=actual_slice.astype(float).round() #rounding
AI_pats[path.split('/')[-1]]=actual_slice #Add slice number to AI_pats
#Volumes of AI detections are extracted from a df created in aorta_calcium_lungnodules.ipynb
volumes_AI=df.loc[int(path.split('/')[-1])][:10].values[df.loc[int(path.split('/')[-1])][:10].values!='-']
AI_pats_vols[path.split('/')[-1]]=volumes_AI #Add volumes in AI_pats_vols
print("Slices with nodules from AI are {}".format(AI_pats[path.split('/')[-1]]))
print("Their volumes are: {}".format(volumes_AI))
except: #If any error print it
print(traceback.format_exc())
pass
AI_pats_slices[path.split('/')[-1]]=AI_slice_names[index_init] #Add to AI_pats_slices all AI slice names
print('\n')
#Ensure that we have the same number of AI and original CT slices
try:
assert size_CTs[index_init]==len(AI_slice_names[index_init])==size_AI[index_init]
print("Size of AI output scan is the same as size of original scan and equals to {}".format(size_AI[index_init]))
except:
print("ERROR! Num of AI slices is {} and num of original CT slices is {}".format(len(AI_slice_names[index_init]),size_CTs[index_init]))
error_flag=1
#Initialize empty lists to be filled below
tp_final=[]
fp_final=[]
fn_final=[]
tp_AI_final=[]
vols_tp=[]
vols_fp=[]
vols_fn=[]
vols_tp_AI=[]
ids_tp=[] #IDs of GT
ids_fp=[] #IDs of AI
ids_fn=[] #IDs of GT
ids_tp_AI=[] #IDs of AI
same_slice_remove=[] #Add slices added incorrectly as TP from looping in nearby slices
consecutive_slices=[] #Add slices here to avoid confusing nearby slices in which one might be TP and the other FN
avoid_FP=[] #Avoid adding wrong slices to FP
avoid_FP_volumes=[] #Add volumes of the above wrong slices
error_FP=[] #For error in which FPs considered as TPs (check below)
orig_check_sl=[ctname.split('.')[4] for ctname in original_CTs[index_init]] #Lists of strings containing possible slices with nodules
print("Possible candidates containing nodules: {}".format(orig_check_sl)) #Some of them different from RedCap slices with +- a few
print('\n')
#It helps to have sorted list. We will avoid errors by checking in order. It changes eg. slice 40 to 040 and adds it in the beginning of list
try:
only_nums=[ctname.split('.')[4] for ctname in original_CTs[index_init]] #Get number of slice from full name
sort_ind=[j for i in sorted(only_nums,key=int) for j in range(len(only_nums)) if only_nums[j]==i] #Get indices of sorted slices
#Use the above indices and replace order of scan slices and annotations
repl_original_CTs=[original_CTs[index_init][ind] for ind in sort_ind]
repl_annotated_CT_files=[annotated_CT_files[index_init][ind] for ind in sort_ind]
#Convert to lists, add them to the original lists (in the right index) and then convert to tuples to be used below in the loops
or_list=list(original_CTs)
an_list=list(annotated_CT_files)
or_list[index_init]=list(repl_original_CTs)
an_list[index_init]=list(repl_annotated_CT_files)
original_CTs=tuple(or_list)
annotated_CT_files=tuple(an_list)
print('Same but sorted',[ctname.split('.')[4] for ctname in original_CTs[index_init]])
except: #If above gives errors, then lists are empty - change them to empty
or_list=list(original_CTs)
an_list=list(annotated_CT_files)
or_list[index_init]=[]
an_list[index_init]=[]
original_CTs=tuple(or_list)
annotated_CT_files=tuple(an_list)
#Sort both GT and AI slices with nodules for consistency
try:
slice_vals,volume_ids,nodule_ids=zip(*sorted(zip(slice_vals,volume_ids,nodule_ids)))
print("GT slices sorted ",slice_vals)
except:
pass
try:
AI_nodule_ids=[x+1 for x in range(len(AI_pats[path.split('/')[-1]]))] #Get AI nodule IDs
AI_pats[path.split('/')[-1]],volumes_AI,AI_nodule_ids=zip(*sorted(zip(AI_pats[path.split('/')[-1]],volumes_AI,AI_nodule_ids)))
print("AI slices sorted",AI_pats[path.split('/')[-1]])
except:
pass
#To deal with error in eg. 748658 - confuses ID of nearby TP nodules - Now an error will be raised to check those cases manually
distance_all=1000 #Set an initial distance between slices to a big number
for ai_check in AI_pats[path.split('/')[-1]]: #Loop over AI slices
for gt_check in slice_vals: #Loop over GT slices
if np.abs(ai_check-gt_check)<=distance_all: #If the AI slice and the GT slice are close to each other (less than the defined distance)
if np.abs(ai_check-gt_check)==distance_all: #If we have two times the same distance (eg. slices 373 and 379 from 376) then we might have error since we don't know which is the correct slice for that distance
error_flag=1 #Set error flag when there are two same AI or GT slices
distance_error=1 #Also set the error distance since even if error_flag set to 1 we will still have '!!!' in the created excel file
else:
pass
distance_all=np.abs(ai_check-gt_check) #Set the distance to the current one
distance_all=1000 #When looping to next AI slice set it again to a big number to ensure that we also check the new AI slice with all the GT ones
#Extraction of annotations (images) for radiologists to review
#Initialize empty lists to be filled below with image data
AI_images=[] #All AI images, TP and FP - Used just for counting
AI_images_tp=[] #TP AI images - used for counting them
AI_images_fp=[] #FP AI images
AI_images_avoid_FP=[] #Keep track of slices since we may have many matches for same GT slice due to looping in a lot of nearby slices.
CT_scans=[] #All original scans images, TP and FN
CT_scans_tp=[] #TP nodule on radiologists' annotations
CT_scans_fn=[] #FN nodule on radiologists' annotations
CT_scans_same_slice=[] #To address cases of nodules considered as TP while they are FN - Look below
AI_images_fp_slice=[] #These will contain the final FP images
CT_scans_fn_slices=[] #These will contain the final FN images
AI_images_tp_slice=[] #These will contain the final TP images
#Only used to count if we have the correct numbers of images compared to findings - Not optimized to also extract TP images for now
AI_images_tp_box=[]
AI_images_avoid_tp_box=[]
for GT_num,GT_slice in enumerate(slice_vals): #Loop over GT slices
found=0 #An index to check if found=1 (TP) or not=0 (FN)
for indexct,ctname in enumerate(original_CTs[index_init]): #Loop over possible CT files having a nodule
for slice_CT_range in range(GT_slice-5,GT_slice+6): #Loop over +-5 CT files that may contain nodules since we may not have the same slice as in GT/REDCap
#For 136470 with FP not getting in the loop below - there are FP not taken into account - this is why condition below added
#'slice_CT_range>=0 and slice_CT_range<=size_CTs[index_init]' ensure that we don't have errors when nodule is on the first or last 10 slices of scan
if int(ctname.split('.')[4])==slice_CT_range and slice_CT_range>=0 and slice_CT_range<=size_CTs[index_init]:
for AI_num, slice_AI in enumerate(AI_pats[path.split('/')[-1]]): #Loop over slices in which AI detected a nodule
for slice_AI_range in range(int(slice_AI)-15,int(slice_AI)+16): #Loop over +-15 slices (changed from +-5 due to failure in 591162) of the nodule detected ones since we may have nodules in GT nearby
if int(slice_AI_range)==int(ctname.split('.')[4]): #If the AI slice is the same as one in CT file names
manual_CT=dicom.dcmread(path+'/'+ctname) #Load original CT DICOM slice to check for nodules
image_manual_CT=manual_CT.pixel_array #Load CT
manual_annotation=dicom.dcmread(path+'/'+annotated_CT_files[index_init][indexct]) #Load corresponding annotated slice
image_manual_annotation=manual_annotation.pixel_array #Load annotated CT
#Find locations of different pixels between original slice and annotations
differences_CT_annot=np.where(image_manual_CT!=image_manual_annotation)
im_CT_annot=np.zeros((512,512)) #Initialize empty array with same dimensions as CT to be filled with only the annotated nodule
im_CT_annot[differences_CT_annot]=image_manual_annotation[differences_CT_annot] #Keep only the region in which nodule exists in manual annotations
im_CT_annot[410:,468:]=0 #Set to zero bottom right region with 'F'
# cv2.imwrite(outputs[index_init]+'/'+ctname+'_manual_annotations.png',im_CT_annot) #Save annotated nodule only
im_CT_annot_thresh=cv2.threshold(im_CT_annot,1,255,cv2.THRESH_BINARY)[1] #Get thresholded version with 0 and 255 only
#####Extraction of annotations for radiologists to review
image_manual_CT_new=cv2.normalize(image_manual_CT,None,alpha=0,beta=255,norm_type=cv2.NORM_MINMAX,dtype=cv2.CV_32F) #Normalize image
new_type=image_manual_CT_new.astype(np.uint16) #Convert image to int16 to be used below - Below conversion to int16 may be omitted
all_new_type_fn=cv2.cvtColor((new_type).astype(np.uint16),cv2.COLOR_GRAY2RGB) #Convert grayscale image to colored
contours=cv2.findContours(im_CT_annot_thresh.astype(np.uint8),cv2.RETR_EXTERNAL,cv2.CHAIN_APPROX_SIMPLE) #Find contours
contours=contours[0] if len(contours)==2 else contours[1]
for cntr in contours: #Add rectangle around nodules
x,y,w,h=cv2.boundingRect(cntr)
cv2.rectangle(all_new_type_fn,(x,y),(x+w,y+h),(0,0,255),2)
#####Extraction of annotations for radiologists finishes here
for AI_slice_num in AI_pats_slices[path.split('/')[-1]]: #Loop over all AI slices of that participant
if int(slice_AI_range)==size_AI[index_init]-int(AI_slice_num.split('.')[4]): #If the AI slice is the same as one AI slice with nodule
AI_dicom=dicom.dcmread(path+'/'+AI_slice_num) #Load AI DICOM slice
image_AI=AI_dicom.pixel_array #Load AI CT slice
#Resize AI image to (512,512) - same size as SEG and CT files below, convert to HSV and get mask for red and yellow
AI_image=image_AI.copy() #As a good practice - to ensure that we don't change the original image
AI_512=cv2.resize(AI_image, dsize=(512, 512), interpolation=cv2.INTER_CUBIC) #Resize to (512,512)
AI_hsv=cv2.cvtColor(AI_512,cv2.COLOR_BGR2HSV) #Convert from BGR to HSV colorspace
mask_im_red=cv2.bitwise_and(AI_hsv,AI_hsv, mask=cv2.inRange(AI_hsv, (100,0,0), (200, 255, 255))) #Red mask - lower and upper HSV values
mask_im_red[0:50,:,:]=0 #Set top pixels that mention 'Not for clinical use' to zero - ignore them and keep only nodules
#Until here mask_im_red is a color image with range of values from 0-255
#Now we convert from BGR (emerged with bitwise operation) to grayscale to shape (512,512). This is used below to take a thresholded image
mask_im_red_gray=cv2.cvtColor(mask_im_red,cv2.COLOR_BGR2GRAY) #Also changes range of values
#Get a smoothed (thresholded) image with only nodules
#If 1 instead of 128 we have more pixels in the nodule contour and not well shaped
mask_red_thresh = cv2.threshold(mask_im_red_gray,128,255,cv2.THRESH_BINARY)[1]
# cv2.imwrite(outputs[index_init]+'/'+ctname+'_AI_detections.png',mask_red_thresh) #Save AI nodules
#####Extraction of annotations for radiologists to review
image_manual_CT_new=cv2.normalize(image_manual_CT,None,alpha=0,beta=255,norm_type=cv2.NORM_MINMAX)
new_type=image_manual_CT_new.astype(np.uint16) #Convert image to int16 - Below conversion may be omitted
all_new_type=cv2.cvtColor((new_type).astype(np.uint16),cv2.COLOR_GRAY2RGB) #Conversion to be easy to view
contours=cv2.findContours(mask_red_thresh,cv2.RETR_EXTERNAL,cv2.CHAIN_APPROX_SIMPLE) #Find contours
contours=contours[0] if len(contours)==2 else contours[1]
for con_ind,cntr in enumerate(contours): #Add rectangles around nodules
one_box=all_new_type.copy() #Get a copy of the image to add box around it without modifying the original one
x,y,w,h=cv2.boundingRect(cntr)
cv2.rectangle(one_box,(x,y),(x+w,y+h),(0,0,255),2)
image_manual_CT=np.zeros((512,512))
#####Extraction of annotations for radiologists finishes here
if np.where(mask_red_thresh!=0)[0].size==0: #If there are no AI detected nodules
#For failure like in 369762, 985215, 184429, 335382 where TP while it should be FN
#Works only when GT slice in orig_CT - It will fail in other cases
if int(slice_CT_range)==int(GT_slice): #If the CT slice with possible nodules is the same as the GT nodule slice
same_slice_remove.append(GT_slice) #This is a FN
CT_scans_same_slice.append(all_new_type_fn) #Add to list of images considered as TP while they are FN
if np.where(mask_red_thresh!=0)[0].size>0: #If there are AI detected nodules
differences_AI_annot=np.where(im_CT_annot_thresh==mask_red_thresh) #Find where AI detected nodule overlaps with manual annotations
#The above refers to the same nodule but with different contours from AI and manual annotations! Not to different nodule locations.
im_overlap=np.zeros((512,512)) #Initialize empty array with same dimensions as CT to be filled with the overlap of AI and manually annotated nodule
im_overlap[differences_AI_annot]=mask_red_thresh[differences_AI_annot] #Add overlap area of AI and manually detected nodule
# cv2.imwrite(outputs[index_init]+'/'+ctname+str(slice_AI)+'_overlap.png',im_overlap)
#For debugging
# print("GT_slice is {}".format(GT_slice))
# print("slice_CT_range=ctname is {}".format(slice_CT_range))
# print("slice_AI_range is {}".format(slice_AI_range))
# print("AI_slice_num is {}".format(AI_slice_num))
# print("slice_AI is {}".format(slice_AI))
if len(np.unique(im_overlap))>1: #If there is overlap between AI and manually annotated nodules
#Since we may have many matches for same GT slice due to looping in a lot of nearby slices.
#Keep track of them and at the end keep only the one with the less distance from GT slice
avoid_FP.append(slice_AI) #Add to list to avoid adding a wrong FP - Failures in cases like 971099
avoid_FP_volumes.append(volumes_AI[AI_num]) #Keep track of the volume of these nodules - needed below
AI_images_avoid_FP.append(one_box) #Same for images
AI_images_avoid_tp_box.append([x,y,x+w,y+h]) #Add box location to list of TP to avoid
#For failure in 673634 - AI detects two nodules on the same slice
#If we haven't added GT in TP, AI slice in TP_AI and GT in consecutive_slices (see below)
if GT_slice not in tp_final and slice_AI not in tp_AI_final and GT_slice not in consecutive_slices:
if slice_CT_range in slice_vals and GT_slice!=slice_CT_range:
#If GT_slice is not the same as the CT_slice and CT_slice exists in GT don't do anything since that'a another nodule
#Get in here for 944714, 985215, 673634, 585377, 591162, 670208
pass
else: #Add GT to TP, AI_slice to TP_AI, same for their volumes and declare that nodule was found
#Below compare AI volume with those of GT (that have the same slice) and keep the one that's it's closest to the AI. We assume that is the correct one
occurence2=np.where(np.array(slice_vals)==GT_slice) #Find where this slice occurs in GT slices
vol_AI_nod=volumes_AI[AI_num] #Get volume of AI nodules
big_dif=10000 #Initialize a value to a big number to keep track of difference between volumes
for i in occurence2[0]: #Loop over indices where slice can be found
dif=np.abs(vol_AI_nod-volume_ids[i]) #Difference between volumes
if dif<=big_dif: #If difference smaller than the one defined
big_dif=dif #Set this difference as the new difference
vol_gt_nod=volume_ids[i] #Get the volume of TP from REDCap
tp_final.append(GT_slice) #Add that slice to TP
vols_tp.append(vol_gt_nod) #Add volume to TP volumes
tp_AI_final.append(slice_AI) #Same for AI TP slice
vols_tp_AI.append(volumes_AI[AI_num]) #Same for AI TP volume
found=1 #Nodule found - not a FN
#Add corresponding images, slices and boxes to the respective lists
AI_images_tp.append(one_box) #TP image AI
CT_scans_tp.append(all_new_type) #TP image
AI_images_tp_slice.append(slice_AI) #AI TP slice
CT_scans.append(all_new_type) #List of all images
AI_images.append(one_box) #List of all AI images
AI_images_tp_box.append([x,y,x+w,y+h]) #AI TP box
elif GT_slice not in tp_final and slice_AI in tp_AI_final and GT_slice not in consecutive_slices:
#If we haven't added GT in TP, in consecutive_slices (see below), but AI_slice was added to TP_AI
if slice_CT_range in slice_vals and GT_slice!=slice_CT_range: #Failure in 673634
#If CT_slice in GT slices and GT_slice!=CT_slice don't do anything since it's another nodule
pass #Here for 944714,810826,985215,591162, 585377, 673634, 670208, 754238, 320656 - no effect so ok
else: #Add GT to TP, AI_slice to TP_AI, same for their volumes, declare that nodule was found, and add to FP_errors (see below)
tp_final.append(GT_slice) #Add TP slice to the TP list
#Same process as above. Compare AI volume with those of REDCap for the same slice and keep the one closest to AI (assumed to be the correct one)
occurence2=np.where(np.array(slice_vals)==GT_slice) #Find where this slice occurs in GT slices
vol_AI_nod=volumes_AI[AI_num] #Get AI volumes
big_dif=10000 #initialize difference between volumes to a big value
for i in occurence2[0]: #Loop over indices where slice can be found
dif=np.abs(vol_AI_nod-volume_ids[i]) #Get difference between GT and AI volume
if dif<=big_dif: #If that difference smaller than the one set initially
big_dif=dif #Replace the biggest difference with the current one
vol_gt_nod=volume_ids[i] #Here we get the volume in REDCap closest to the one of AI
vols_tp.append(vol_gt_nod) #Add REDCap volume found in the above loop to the TP volumes
#Below added for a second time but will be corrected below - need to have a correspondance/same number of slices as TP and that's why this is added
#Get here for 521779, 585377, 810826
tp_AI_final.append(slice_AI) #Add TP AI slice to list
vols_tp_AI.append(volumes_AI[AI_num]) #Same for TP AI volume
#Here for 163557, 585377, 985215, 810826, 944714, 892519, 670208 but no errors created
error_FP.append(slice_AI) #Add AI slice to possible errors where FP considered as TP
found=1 #Nodule found - not a FN
#Add images to the respective lists - Same as above
AI_images_tp.append(one_box)
CT_scans_tp.append(all_new_type)
AI_images_tp_slice.append(slice_AI)
CT_scans.append(all_new_type)
AI_images.append(one_box)
AI_images_tp_box.append([x,y,x+w,y+h])
#For failures in 670208 and 845594 with 2 same slices in GT
#If GT_slice in TP, GT_slice exists more than once in GT slices, and slice_AI not added in TP_AI
#Add them as above
elif GT_slice in tp_final and len(np.where(np.array(slice_vals)==GT_slice)[0])>1 and slice_AI not in tp_AI_final:
tp_final.append(GT_slice) #Add GT slice to TP slices
#Same process as above
occurence2=np.where(np.array(slice_vals)==GT_slice) #Find where this slice occurs in GT slices
vol_AI_nod=volumes_AI[AI_num] #This is the AI volume
big_dif=10000
for i in occurence2[0]: #Loop over indices where slice can be found
dif=np.abs(vol_AI_nod-volume_ids[i])
if dif<=big_dif:
big_dif=dif
vol_gt_nod=volume_ids[i]
vols_tp.append(vol_gt_nod)
tp_AI_final.append(slice_AI)
vols_tp_AI.append(volumes_AI[AI_num])
found=1 #Nodule found - not a FN
#Add images to the respective lists
AI_images_tp.append(one_box)
CT_scans_tp.append(all_new_type)
AI_images_tp_slice.append(slice_AI)
CT_scans.append(all_new_type)
AI_images.append(one_box)
AI_images_tp_box.append([x,y,x+w,y+h])
#Last condition len(np.where) to address failure in 429789 - Here when there is no overlap => no TP
#Cases with TP and two same GT slices addressed exactly above
elif GT_slice in tp_final and int(slice_CT_range)==int(GT_slice) and len(np.where(np.array(orig_check_sl)==str(GT_slice))[0])<2:
#If there is no overlap between AI and manual annotation (no TP) but GT_slice already in TP (added wrongly in another loop), CT_slice==GT_slice and we only have GT_slice once in possible CT slices with nodules
#We get in here only for 136154 (two different annotations in two consecutive slices (one in AI detections (FP) and one in GT (FN))), 673634
ind_remove=np.where(np.array(tp_final)==GT_slice) #Find index of tp_final to remove
tp_final.remove(GT_slice) #Remove this slice from TP
del tp_AI_final[ind_remove[0][0]] #Remove it from TP_AI as well
del vols_tp[ind_remove[0][0]] #Remove the volume from TP volumes
del vols_tp_AI[ind_remove[0][0]] #Remove the volume from TP_AI volumes
fp_final.append(slice_AI) #Add AI_slice to FP
fn_final.append(GT_slice) #Add GT_slice to FN
vols_fp.append(volumes_AI[AI_num]) #Add volume AI to volumes with FPs
vols_fn.append(volume_ids[GT_num]) #Add volume of GT to volumes with FNs
#For failure in 673634 - two same slices in AI detections
consecutive_slices.append(GT_slice) #Add it in list since there is also a nearby slice in GT
#Remove image considered as TP and add the respective images as FP, FN
del AI_images_tp[ind_remove[0][0]]
del AI_images_tp_box[ind_remove[0][0]]
del CT_scans_tp[ind_remove[0][0]]
AI_images_fp.append(one_box)
CT_scans_fn.append(all_new_type_fn)
AI_images_fp_slice.append(slice_AI)
CT_scans_fn_slices.append(GT_slice)
del AI_images_tp_slice[ind_remove[0][0]]
#For failures in eg. 971099, 944714, 985215, 892519, 163557, 670208, 278319
#Since we may have many matches for same GT slice due to looping in a lot of nearby slices.
#Keep track of them all and at the end keep only the one with the less distance from GT slice
if len(avoid_FP)>1: #If we have more than one matches for the same slice
try: #Error since in 720754 no tp_AI_final[-1] exists yet - empty list - will be created when we loop over next slices
if [x for val_check in avoid_FP for x in range(int(val_check)-15,int(val_check)+15) if x==tp_AI_final[-1]]!=[]:
#Above condition added since we may add slices to avoid_FP without actually adding a TP, meaning that we will replace wrong FP slice - example is the extra FP in 673634
# #For debugging
# print("GT_slice is",GT_slice)
# print('Possible slices with a TP_AI are {}'.format(avoid_FP))
# print('Their volumes are {}'.format(avoid_FP_volumes))
distance=1000 #Initialize this value to be much bigger than the maximum difference of slices squared (here maximum is 15^2=225)
keep=-1 #initialize an index to -1
for ind_sel,select in enumerate(avoid_FP): #Loop over indices and values of wrong FP
#= set to correct error in 585377
if np.abs(GT_slice-select)<=distance: #If the difference between our GT_slice and the possible wrong FP is less than the above distance
#In case with 10 nodules (eg. 585377) we have GT in 131 and possible TP_AI in 129,133. == below added to find the correct one (133)
if np.abs(GT_slice-select)==distance: #In case that we have two AI slices with the same distance from GT slice
ind_vol_GT=np.where(np.array(slice_vals)==GT_slice) #Get index where GT slice occurs to find GT volume
for real_slice in ind_vol_GT[0]: #Loop over the GT occurences
vol_ind_sel_dif=np.abs(avoid_FP_volumes[ind_sel]-volume_ids[real_slice]) #Volume difference of current AI slice with GT volume
vol_keep=np.abs(avoid_FP_volumes[keep]-volume_ids[real_slice]) #Volume difference of previous AI slice of same distance with GT volume
if vol_ind_sel_dif>vol_keep: #If the current AI slice is further away in terms of volume size, ignore it
pass
elif vol_ind_sel_dif<=vol_keep: #If the current AI slice is closer in terms of volume size with GT volume, keep this instead
keep=ind_sel #Keep the index in that case
else: #If np.abs(GT_slice-select)<distance
distance=np.abs(GT_slice-select) #Replace distance with the new one
keep=ind_sel #Keep the index
#Get in here also for the following: 335382, 384136, 395464, 427498, 440453,
#591162, 673634, 944714, 971099, 985215, but no issue created
#In 985215 we get the correct results since we don't get in the if statement below and not replacement takes place
#AI slice 167 (AI5 nodule) matched with 169 of GT (L20) and then, AI slice 168 (AI6 nodule) matched with slice 170 of GT (L19) - since no replacement
print("Kept slice which is closest to GT slice - here {} with volume {}".format(avoid_FP[keep],avoid_FP_volumes[keep]))
#Below to address failure in 985215 after fixing 971099 and to avoid errors with TP_AI nodules and their volumes/IDs
#If the slice with the minimum distance not already added to TP_AI (whereas the corresponding GT was added to TP), add it
#This might happen if eg. we were not in any of the if/elif statements above (where avoid_FP was defined)
if avoid_FP[keep] not in tp_AI_final and GT_slice in tp_final:
#We get in here at least for: 944714 985215 971099 335382 427498 440453 585377 591162 892519 163557 670208
tp_AI_final[-1]=avoid_FP[keep] #Replace last TP AI slice
#####Extraction of annotations for radiologists to review
AI_images_tp[-1]=AI_images_avoid_FP[keep] #Replace TP image
AI_images_tp_box[-1]=AI_images_avoid_tp_box[keep] #Change TP box coordinates
AI_images_tp_slice[-1]=avoid_FP[keep] #Replace last slice with the slice from
#####Extraction of annotations for radiologists finishes here
for FP_ind,FP_slice_check in enumerate(AI_pats[path.split('/')[-1]]):
if avoid_FP[keep]==FP_slice_check and volumes_AI[FP_ind]==avoid_FP_volumes[keep]:
vols_tp_AI[-1]=volumes_AI[FP_ind] #Replace AI volume with the correct one
except:
pass
avoid_FP=[] #Set the possible FP to empty for next GT slice loop
avoid_FP_volumes=[] #Same for their volumes
print('\n')
AI_images_avoid_FP=[] #And for the TP AI image
if found==0: #If there is no correspondance for nodule in the GT_slice with an AI nodule then it's a FN
fn_final.append(GT_slice) #Add that to list of FN slices
occurence2=np.where(np.array(slice_vals)==GT_slice) #Find where this slice occurs in GT slices
#This will work only when we have two times the same slice. If more than two then it will fail
for i in occurence2[0]: #Loop over indices where slice can be found
if len(occurence2[0])==2 and GT_slice in tp_final: #If there are two same slices and one already added in TPs
if volume_ids[i] in vols_tp: #If the current volume added in TP volume then don't do anything
pass
else: #Otherwise added to FN
vols_fn.append(volume_ids[i])
else: #If not, add the volume to FNs
vols_fn.append(volume_ids[GT_num])
#####Extraction of annotations for radiologists to review
flag=0 #Flag to check if we have added nodule to FN
perfect_slice=0 #To denote that FN slice exists in CT slices with possible nodules
#Sort to fix error in 998310 - better since it's more generalizable
temp_original_CTs,temp_annot_CTs=original_CTs[index_init],annotated_CT_files[index_init]
#We can replace 'temp' with original ones - Already sorted above and they should work here - Does not make any difference if left untouched
#Get index and slice number in which we have the FN in the above sorted list
#This will not work if two FN in the same slice!
if int(GT_slice) in np.array([int(ctname.split('.')[4]) for ctname in original_CTs[index_init]]):
occurence=np.where(np.array([int(ctname.split('.')[4]) for ctname in temp_original_CTs])==int(GT_slice))[0][0] #Get indices to be accessed below
perfect_slice=1 #indicate that slice with FN exists in CT slices with possible nodules
for indexct,ctname in enumerate(temp_original_CTs): #Loop over possible CT files having a nodule
for slice_CT_range in range(GT_slice-5,GT_slice+6): #Loop over +-5 CT files that may contain nodules since we may not have the same slice as in GT/REDCap
if slice_CT_range>=0 and slice_CT_range<=size_CTs[index_init]: #To avoid errors if nodules in last/first 10 slices
if perfect_slice==1: #If the slice with FN exists in CT slices with possible nodules, then check only this slice and not the rest
slice_CT_range=GT_slice #So that we won't loop in other slices
ctname=temp_original_CTs[occurence] #Change the name to be checked below to the current one
indexct=occurence #Same for the index
else:
pass
if flag==1: #If we have already added the FN in the list in a previous iteration break from the loop
break
#For 136470 with FP not getting in the loop below - there are FP not taken into account - this is why condition below added
if int(ctname.split('.')[4])==slice_CT_range:
manual_CT=dicom.dcmread(path+'/'+ctname) #Load original CT DICOM slice to check for nodules
image_manual_CT=manual_CT.pixel_array #Load CT
manual_annotation=dicom.dcmread(path+'/'+temp_annot_CTs[indexct]) #Load corresponding annotated slice
image_manual_annotation=manual_annotation.pixel_array #Load annotated CT
#Find locations of different pixels between original slice and annotations
differences_CT_annot=np.where(image_manual_CT!=image_manual_annotation)
im_CT_annot=np.zeros((512,512)) #Initialize empty array with same dimensions as CT to be filled with only the annotated nodule
im_CT_annot[differences_CT_annot]=image_manual_annotation[differences_CT_annot]
#Keep only the region in which nodule exists in manual annotations
im_CT_annot[410:,468:]=0 #Set to zero bottom right region with 'F'
# cv2.imwrite(outputs[index_init]+'/'+ctname+'_manual_annotations.png',im_CT_annot) #Save annotated nodule only
im_CT_annot_thresh=cv2.threshold(im_CT_annot,1,255,cv2.THRESH_BINARY)[1] #Get thresholded version with 0 and 255 only
image_manual_CT_new=cv2.normalize(image_manual_CT,None,alpha=0,beta=255,norm_type=cv2.NORM_MINMAX,dtype=cv2.CV_32F) #Normalize image
new_type=image_manual_CT_new.astype(np.uint16) #Convert to int16 - Below not needed to convert to int16 again
all_new_type=cv2.cvtColor((new_type).astype(np.uint16),cv2.COLOR_GRAY2RGB) #Convert to color
contours=cv2.findContours(im_CT_annot_thresh.astype(np.uint8),cv2.RETR_EXTERNAL,cv2.CHAIN_APPROX_SIMPLE) #Find contours
contours=contours[0] if len(contours)==2 else contours[1]
for cntr in contours: #Add nodule contours to image
x,y,w,h=cv2.boundingRect(cntr)
cv2.rectangle(all_new_type,(x,y),(x+w,y+h),(0,0,255),2)
if len(np.unique(new_type))>1:#Add images to corresponding lists
CT_scans_fn.append(all_new_type) #Original CT slice with nodule contour around the FN
CT_scans.append(all_new_type) #Add image to the list of all images
CT_scans_fn_slices.append(GT_slice) #Add slice to FN slice list
flag=1 #Set flag to 1 to denote that nodule was added in FN findings
#Set these to 0 again
perfect_slice=0
flag=0
#####Extraction of annotations for radiologists finishes here
print('\n')
#Not all FP were taken into account above - We have mostly focussed until now to TP, FN
#In cases like 136470 where FP not taken into account/added in the above lists we have the code below
flag=0 #New flag here set to 0
#Below to keep track of coordinates of FP nodule and slices
blanks=[] #To keep track of temporary coordinates - modified below
blanks_final=[] #Here the final coordinates
blank_slices=[] #Here the final slices
files_all=[] #Get a list of all the files in the folder of a particular participant
for file in os.listdir(path):
files_all.append(file)
for AI_fp_inds,slice_with_FP in enumerate(AI_pats[path.split('/')[-1]]): #loop over all AI slices with nodules
if slice_with_FP not in fp_final and slice_with_FP not in tp_AI_final: #If this slice not in FP and not in TP_AI
fp_final.append(slice_with_FP) #Add it to FPs
vols_fp.append(volumes_AI[AI_fp_inds]) #Add its volume to volumes of FPs
#Failure in 892519 where the same two AI slices exist, one of them being TP and the other FP - Extracted images below will not be correct either
#Similarly, in 673634 these two AI slices are both FP
counting=0 #Initialize a counter to 0
total_AI=len(np.where(np.array(AI_pats[path.split('/')[-1]])==slice_with_FP)[0]) #Find how many occurences of this slice
#If this slice does not exist in TP_AI, it either exists in FP or not exists at all
counting=counting+len(np.where(fp_final==slice_with_FP)[0]) #Similar as above but only with FP occurences now
fp_position=[] #If we have the same FP slice more than once
if counting<total_AI: #Get in here for 673634, 128443, 129311
#Similarly as above
occurences=np.where(np.array(AI_pats[path.split('/')[-1]])==slice_with_FP)[0]
for occur_ind,indexing in enumerate(occurences): #We may have the same FP slice more than once4
if volumes_AI[indexing] not in vols_fp: #Add volume and index
vols_fp.append(volumes_AI[indexing])
fp_position.append(occur_ind)
for i in range(total_AI-counting): #Add slice to FP list many times (as many as it occurs as FP)
fp_final.append(slice_with_FP)
#####Extraction of annotations for radiologists to review
for indexct_file,filename in enumerate(files_all): #Loop over list of all CT files
if len(filename.split('.')[3])==1 and int(filename.split('.')[4])==int(slice_with_FP): #If we have an original CT slice and its slice number is in 'slice_with_FP'
manual_CT=dicom.dcmread(path+'/'+filename) #Load original CT DICOM slice to check for nodules
image_manual_CT=manual_CT.pixel_array #Load CT
for AI_slice_num in AI_pats_slices[path.split('/')[-1]]: #Loop over all AI slices of that participant
if int(slice_with_FP)==size_AI[index_init]-int(AI_slice_num.split('.')[4]) and flag==0: #If the AI slice is the same as one AI slice with nodule
# print("AI slice num {}".format(AI_slice_num))
# print("slice_with_FP {}".format(slice_with_FP))
# print("size_AI is {}".format(size_AI[index_init]))
AI_dicom=dicom.dcmread(path+'/'+AI_slice_num) #Load AI DICOM slice
image_AI=AI_dicom.pixel_array #Load AI CT slice
#Resize AI image to (512,512) - same size as SEG and CT files below, convert to HSV and get mask for red and yellow
AI_image=image_AI.copy() #As a good practice - to ensure that we don't change the original image
AI_512=cv2.resize(AI_image, dsize=(512, 512), interpolation=cv2.INTER_CUBIC) #Resize to (512,512)
AI_hsv=cv2.cvtColor(AI_512,cv2.COLOR_BGR2HSV) #Convert from BGR to HSV colorspace
mask_im_red=cv2.bitwise_and(AI_hsv,AI_hsv, mask=cv2.inRange(AI_hsv, (100,0,0), (200, 255, 255))) #Red mask - lower and upper HSV values
mask_im_red[0:50,:,:]=0 #Set top pixels that mention 'Not for clinical use' to zero - ignore them and keep only nodules
#Until here mask_im_red is a color image with range of values from 0-255
#Now we convert from BGR (emerged with bitwise operation) to grayscale to shape (512,512)
#It is used below to take a thresholded image
mask_im_red_gray=cv2.cvtColor(mask_im_red,cv2.COLOR_BGR2GRAY) #Also changes range of values
#Get a smoothed (thresholded) image with only nodules
#If 1 instead of 128 we have more pixels in the nodule contour and not well shaped
mask_red_thresh = cv2.threshold(mask_im_red_gray,128,255,cv2.THRESH_BINARY)[1]
# cv2.imwrite(outputs[index_init]+'/'+ctname+'_AI_detections.png',mask_red_thresh) #Save AI nodules
#Normalize image and change its type to uint16
image_manual_CT_new=cv2.normalize(image_manual_CT,None,alpha=0,beta=255,norm_type=cv2.NORM_MINMAX)#,dtype=cv2.CV_32F)
new_type=image_manual_CT_new.astype(np.uint16)
all_new_type=cv2.cvtColor((new_type).astype(np.uint16),cv2.COLOR_GRAY2RGB)
contours=cv2.findContours(mask_red_thresh,cv2.RETR_EXTERNAL,cv2.CHAIN_APPROX_SIMPLE)
contours=contours[0] if len(contours)==2 else contours[1]
#Below loop added to ensure that we catch all nodules and don't have missing slices
#This might happen for eg. 916856 where AI nodule 2 and 6 are on the same location (coordinates) but on different slices
for check_AI_slice in AI_pats[path.split('/')[-1]]: #Loop over AI slices
close_slice_found=0 #Initialize a flag to 0
check_AI_slice=int(check_AI_slice) #Get AI slice as integer
for check_AI_slice_range in range(check_AI_slice-10,check_AI_slice+10): #Loop over the previous and next 10 slices
#If close_slice_found=0 then it's definitely a FP since there is no closeby GT slice there
#If there is another AI slice within 10 slices from the one that is being checked or if the one being checked is in GT slices then we might have a TP as well
if (check_AI_slice_range in [int(x) for x in AI_pats[path.split('/')[-1]]] and check_AI_slice_range!=check_AI_slice) or check_AI_slice_range in [int(x) for x in slice_vals]:
close_slice_found=1
elif [int(x) for x in AI_pats[path.split('/')[-1]]].count(check_AI_slice)>1: #If this slice exists more than once in AI slices then we might also have a TP in them
close_slice_found=1
for con_ind,cntr in enumerate(contours): #Plot nodule contours around it
one_box=all_new_type.copy()
x,y,w,h=cv2.boundingRect(cntr)
cv2.rectangle(one_box,(x,y),(x+w,y+h),(0,0,255),2)
# cv2.imwrite(outputs[index_init]+'/'+str(x)+','+str(y)+','+str(x+w)+','+str(y+h)+'_AI_detections.png',one_box)
if close_slice_found==0: #it's definitely a FP
if len(np.unique(mask_red_thresh))>1 and flag==0: #If not already added, add it to FP slices
blanks.append([x,y,x+w,y+h]) #Append coordinates of FP nodule
if len(blanks)==2: #If we have two nodules being FP
box_1=torch.tensor([blanks[-2]],dtype=torch.float) #Get box around first nodules
box_2=torch.tensor([blanks[-1]],dtype=torch.float) #Same for second
iou=float(bops.box_iou(box_1,box_2)) #calculate overlap between them
if iou<0.05: #If there is almost no overlap then the new nodule is also FP
AI_images.append(one_box) #Add image to list of images
AI_images_fp.append(one_box) #Add image to list of FP images
AI_images_fp_slice.append(slice_with_FP) #Add FP slice to list of FP
blanks_final.append([x,y,x+w,y+h]) #Add also its coordinates to final ones
blank_slices.append(slice_with_FP) #Add slice of it as well
#If the slice being examined exists only once in AI slices and the next and the previous of it is not in the AI slice list then set flag to 1 to denote that nodule added in FP
if len(np.where(np.array(AI_pats[path.split('/')[-1]])==slice_with_FP)[0])<=1 and (slice_with_FP+1 not in AI_pats[path.split('/')[-1]] and slice_with_FP-1 not in AI_pats[path.split('/')[-1]]):
flag=1
elif len(blanks)==1: #If this is the first box being examined add it to list of FPs
AI_images.append(one_box)
AI_images_fp.append(one_box)
AI_images_fp_slice.append(slice_with_FP)
blanks_final.append([x,y,x+w,y+h])
blank_slices.append(slice_with_FP)
#Same condition as above: If slice_with_FP+-1 not in AI slices with nodules (for error in 873698) - set flag to 1
if len(np.where(np.array(AI_pats[path.split('/')[-1]])==slice_with_FP)[0])<=1 and (slice_with_FP+1 not in AI_pats[path.split('/')[-1]] and slice_with_FP-1 not in AI_pats[path.split('/')[-1]]):
flag=1
else: #For 3 or more boxes:
box_1=torch.tensor([blanks[-1]],dtype=torch.float) #Last nodule box
flag_blanks_fin=0 #Set a flag to 0 (if remains 0 that would mean that this box/FP was not added yet)
for fin_blank in blanks_final: #Loop over the boxes for which we are confident that they are FP
box_check=torch.tensor([fin_blank],dtype=torch.float) #Get box of one of the confident FPs
iou_check=float(bops.box_iou(box_1,box_check)) #Calculate overlap of it with the latest added box
if iou_check>0.05: #here the opposite since if there is one match that means that it exists
flag_blanks_fin=1 #Set flag to 1 to denote that box (and so FP) already added in list
if flag_blanks_fin==0:#If this box not added yet then add it to FPs
AI_images.append(one_box)
AI_images_fp.append(one_box)
AI_images_fp_slice.append(slice_with_FP)
blanks_final.append([x,y,x+w,y+h])
blank_slices.append(slice_with_FP)
#Same condition as above to denote that box added
if len(np.where(np.array(AI_pats[path.split('/')[-1]])==slice_with_FP)[0])<=1 and (slice_with_FP+1 not in AI_pats[path.split('/')[-1]] and slice_with_FP-1 not in AI_pats[path.split('/')[-1]]):
flag=1
else: #In case the slice might or might not be a FP
if len(AI_images_tp_box)>0: #If we have already added some TP boxes of similar nodules in the corresponding variable
#Everything below will only work for cases that we have added at least one TP image
for tp_box in AI_images_tp_box: #loop over TP boxes
box_tp=torch.tensor([tp_box],dtype=torch.float) #Get TP box
box_new=torch.tensor([[x,y,x+w,y+h]],dtype=torch.float) #get current box
iou_tp=float(bops.box_iou(box_tp,box_new)) #calculate overlap
if iou_tp<0.01: #Means no overlap and so, two separate boxes - Now we are confident it's a FP
#Now use same code as above where we were confident it was FP
if len(np.unique(mask_red_thresh))>1 and flag==0: #If not already added, add it to FP slices
blanks.append([x,y,x+w,y+h]) #add box coordinates to list
if len(blanks)==2: # If two boxes added in list with boxes
box_1=torch.tensor([blanks[-2]],dtype=torch.float)
box_2=torch.tensor([blanks[-1]],dtype=torch.float)
iou=float(bops.box_iou(box_1,box_2))
if iou<0.05:
AI_images.append(one_box)
AI_images_fp.append(one_box)
AI_images_fp_slice.append(slice_with_FP)
blanks_final.append([x,y,x+w,y+h])
blank_slices.append(slice_with_FP)
if len(np.where(np.array(AI_pats[path.split('/')[-1]])==slice_with_FP)[0])<=1 and (slice_with_FP+1 not in AI_pats[path.split('/')[-1]] and slice_with_FP-1 not in AI_pats[path.split('/')[-1]]):
flag=1
elif len(blanks)==1: #If only one box added in list with boxes
AI_images.append(one_box)
AI_images_fp.append(one_box)
AI_images_fp_slice.append(slice_with_FP)
blanks_final.append([x,y,x+w,y+h])
blank_slices.append(slice_with_FP)
#If slice_with_FP+-1 not in AI slices with nodules (for error in 873698) - set flag to 1
if len(np.where(np.array(AI_pats[path.split('/')[-1]])==slice_with_FP)[0])<=1 and (slice_with_FP+1 not in AI_pats[path.split('/')[-1]] and slice_with_FP-1 not in AI_pats[path.split('/')[-1]]):
flag=1
else: #If more than 2 boxes
box_1=torch.tensor([blanks[-1]],dtype=torch.float) #Get box 1
flag_blanks_fin=0 #Set a flag to 0 (if remains 0 that would mean that this box/FP was not added yet)
for fin_blank in blanks_final:
box_check=torch.tensor([fin_blank],dtype=torch.float)
iou_check=float(bops.box_iou(box_1,box_check))
if iou_check>0.05: #here the opposite since if there is one match that means that it exists
flag_blanks_fin=1
else:
pass
if flag_blanks_fin==0:
AI_images.append(one_box)
AI_images_fp.append(one_box)
AI_images_fp_slice.append(slice_with_FP)
blanks_final.append([x,y,x+w,y+h])
blank_slices.append(slice_with_FP)
if len(np.where(np.array(AI_pats[path.split('/')[-1]])==slice_with_FP)[0])<=1 and (slice_with_FP+1 not in AI_pats[path.split('/')[-1]] and slice_with_FP-1 not in AI_pats[path.split('/')[-1]]):
flag=1
else: #Again we have exactly the same code as above for cases without TP boxes added
if len(np.unique(mask_red_thresh))>1 and flag==0: #If not already added, add it to FP slices
blanks.append([x,y,x+w,y+h])
if len(blanks)==2:
box_1=torch.tensor([blanks[-2]],dtype=torch.float)
box_2=torch.tensor([blanks[-1]],dtype=torch.float)
iou=float(bops.box_iou(box_1,box_2))
if iou<0.05:
AI_images.append(one_box)
AI_images_fp.append(one_box)
AI_images_fp_slice.append(slice_with_FP)
blanks_final.append([x,y,x+w,y+h])
blank_slices.append(slice_with_FP)
if len(np.where(np.array(AI_pats[path.split('/')[-1]])==slice_with_FP)[0])<=1 and (slice_with_FP+1 not in AI_pats[path.split('/')[-1]] and slice_with_FP-1 not in AI_pats[path.split('/')[-1]]):
flag=1
elif len(blanks)==1:
AI_images.append(one_box)
AI_images_fp.append(one_box)
AI_images_fp_slice.append(slice_with_FP)
blanks_final.append([x,y,x+w,y+h])
blank_slices.append(slice_with_FP)
#If slice_with_FP+-1 not in AI slices with nodules (for error in 873698) - set flag to 1
if len(np.where(np.array(AI_pats[path.split('/')[-1]])==slice_with_FP)[0])<=1 and (slice_with_FP+1 not in AI_pats[path.split('/')[-1]] and slice_with_FP-1 not in AI_pats[path.split('/')[-1]]):
flag=1
else:
box_1=torch.tensor([blanks[-1]],dtype=torch.float)
flag_blanks_fin=0
for fin_blank in blanks_final:
box_check=torch.tensor([fin_blank],dtype=torch.float)
iou_check=float(bops.box_iou(box_1,box_check))
if iou_check>0.05: #here the opposite since if there is one match that means that it exists
flag_blanks_fin=1
else:
pass
if flag_blanks_fin==0:
AI_images.append(one_box)
AI_images_fp.append(one_box)
AI_images_fp_slice.append(slice_with_FP)
blanks_final.append([x,y,x+w,y+h])
blank_slices.append(slice_with_FP)
if len(np.where(np.array(AI_pats[path.split('/')[-1]])==slice_with_FP)[0])<=1 and (slice_with_FP+1 not in AI_pats[path.split('/')[-1]] and slice_with_FP-1 not in AI_pats[path.split('/')[-1]]):
flag=1
#Set image and flag to zeros
image_manual_CT=np.zeros((512,512))
flag=0
#Avoid cases in which same slice added to FP images twice - We don't cover cases with three consecutive FP slices!
#Got in here for 136307,184429,335382, 873698
these_keep=[] #list of indices to keep
for ind,img in enumerate(AI_images_fp): #Loop over FP images
num_dif=0 #Index to keep track of the same images contained in AI FP image list
indices=[] #Keep track of indices of FP images
for ind_last,img_last in enumerate(AI_images_fp): #Second loop on FP images
if len(np.where(img!=img_last)[0])>5000: #For different FP images
num_dif=num_dif+1 #Keep track of the number of different images in above list
indices.append(ind_last) #and of their indices
tot=len(AI_images_fp)-num_dif #Number of same images in the above list
if tot==2: #If we have the same 2 FP images
for i in range(len(AI_images_fp)):
if i not in indices:
these_keep.append(i) #Indices to keep for that case - Get in here for 673634
elif tot==1: #+1 since we don't take into account ind_last=ind
these_keep.append(ind)
#Loop over the unique indices with FP, add FP to the corresponding list, and keep track of their slices
AI_images_newfp=[]
AI_images_newfp_slice=[]
for kept in np.unique(these_keep):
AI_images_newfp.append(AI_images_fp[kept])
AI_images_newfp_slice.append(AI_images_fp_slice[kept])
AI_images_fp=AI_images_newfp
AI_images_fp_slice=AI_images_newfp_slice
#####Extraction of annotations for radiologists finishes here
#For failure in 369762, 184429, 225969, 585377 where we get a TP while is should be an FN
for ind_wrong,wrong_annot in enumerate(same_slice_remove): #Loop over slices that were accidentaly added in this list (slices added incorrectly as TP from looping in nearby slices)
#We may have the same slice multiple times in same_slice_remove (and only once in TP)=> Not an issue since we will only get once inside the 'if' statement below
#This list has slices with no overlap between GT and AI slices and for which slice_CT_range==GT_slice, meaning there are FN - Example 429703
if wrong_annot in tp_final: #If this slice also in TP
ind_remove=np.where(np.array(tp_final)==wrong_annot) #Find index of occurence
tp_final.remove(wrong_annot) #Remove from TP
#For failure in 810826 (defined the loop below for it), 985215, 369762
#If got below it means that we had TP and TP_AI. The latter should also be removed and added to FP instead
if tp_AI_final[ind_remove[0][0]] not in error_FP: #If the corresponding TP_AI slice not in error_FP
fp_final.append(tp_AI_final[ind_remove[0][0]]) #Add it to FP
fp_vol_check=np.where(np.array(AI_pats[path.split('/')[-1]])==tp_AI_final[ind_remove[0][0]])
vols_fp.append(volumes_AI[fp_vol_check[0][0]])
##########Extraction of annotations for radiologists to review
for indexct_file,filename in enumerate(files_all): #Loop over all files for that participant and get slice in which TP AI nodule can be found
if len(filename.split('.')[3])==1 and int(filename.split('.')[4])==int(tp_AI_final[ind_remove[0][0]]):
manual_CT=dicom.dcmread(path+'/'+filename) #Load original CT DICOM slice to check for nodules
image_manual_CT=manual_CT.pixel_array #Load CT
for indexct_file,filename in enumerate(files_all):
if int(size_AI[index_init]-tp_AI_final[ind_remove[0][0]])==int(filename.split('.')[4]) and len(filename.split('.')[3])!=1 and flag==0: #If the AI slice is the same as one AI slice with nodule
AI_dicom=dicom.dcmread(path+'/'+filename) #Load AI DICOM slice
image_AI=AI_dicom.pixel_array #Load AI CT slice
#Resize AI image to (512,512) - same size as SEG and CT files below, convert to HSV and get mask for red and yellow
AI_image=image_AI.copy() #As a good practice - to ensure that we don't change the original image
AI_512=cv2.resize(AI_image, dsize=(512, 512), interpolation=cv2.INTER_CUBIC) #Resize to (512,512)
AI_hsv=cv2.cvtColor(AI_512,cv2.COLOR_BGR2HSV) #Convert from BGR to HSV colorspace
mask_im_red=cv2.bitwise_and(AI_hsv,AI_hsv, mask=cv2.inRange(AI_hsv, (100,0,0), (200, 255, 255))) #Red mask - lower and upper HSV values
mask_im_red[0:50,:,:]=0 #Set top pixels that mention 'Not for clinical use' to zero - ignore them and keep only nodules
#Until here mask_im_red is a color image with range of values from 0-255
#Now we convert from BGR (emerged with bitwise operation) to grayscale to shape (512,512)
#It is used below to take a thresholded image
mask_im_red_gray=cv2.cvtColor(mask_im_red,cv2.COLOR_BGR2GRAY) #Also changes range of values
#Get a smoothed (thresholded) image with only nodules
#If 1 instead of 128 we have more pixels in the nodule contour and not well shaped
mask_red_thresh = cv2.threshold(mask_im_red_gray,128,255,cv2.THRESH_BINARY)[1]
# cv2.imwrite(outputs[index_init]+'/'+ctname+'_AI_detections.png',mask_red_thresh) #Save AI nodules
image_manual_CT_new=cv2.normalize(image_manual_CT,None,alpha=0,beta=255,norm_type=cv2.NORM_MINMAX)#,dtype=cv2.CV_32F)
new_type=image_manual_CT_new.astype(np.uint16)
all_new_type=cv2.cvtColor((new_type).astype(np.uint16),cv2.COLOR_GRAY2RGB)
contours=cv2.findContours(mask_red_thresh,cv2.RETR_EXTERNAL,cv2.CHAIN_APPROX_SIMPLE)
contours=contours[0] if len(contours)==2 else contours[1]
for con_ind,cntr in enumerate(contours): #Plot contour around possible FP nodule
one_box=all_new_type.copy()
x,y,w,h=cv2.boundingRect(cntr)
cv2.rectangle(one_box,(x,y),(x+w,y+h),(0,0,255),2)
if len(np.unique(mask_red_thresh))>1 and flag==0: #Add image to FP images list
AI_images.append(one_box)
AI_images_fp.append(one_box)
AI_images_fp_slice.append(tp_AI_final[ind_remove[0][0]])
#Set flag to 1 if this was the only nodule on that slice which was classified as TP whereas it was FP
if len(np.where(np.array(AI_pats[path.split('/')[-1]])==tp_AI_final[ind_remove[0][0]])[0])<=1:
flag=1
image_manual_CT=np.zeros((512,512))
flag=0
#####Extraction of annotations for radiologists finishes here
del tp_AI_final[ind_remove[0][0]] #Then remove it from TP_AI
del vols_tp_AI[ind_remove[0][0]] #Same for AI volume
del AI_images[ind_remove[0][0]] #Delete a random of the same occuring images here just for same number below
del AI_images_tp[ind_remove[0][0]] #Delete it from TP AI images
del AI_images_tp_slice[ind_remove[0][0]] #Delete TP AI slice too
del AI_images_tp_box[ind_remove[0][0]] #And TP AI box coordinates
fn_final.append(wrong_annot) #Add it to FN
vols_fn.append(vols_tp[ind_remove[0][0]]) #Add FN volume to list
del vols_tp[ind_remove[0][0]] #Delete it from TP volumes
CT_scans_fn.append(CT_scans_same_slice[ind_wrong]) #Add GT scan and slice to FN lists
CT_scans_fn_slices.append(wrong_annot) #Add slice to FN slices
del CT_scans_tp[ind_remove[0][0]] #Delete it from TPs
try: #Confirm that we have the expected number of TPs, FPs and FNs
assert len(tp_final)+len(fn_final)==len(slice_vals)
assert len(tp_final)+len(fp_final)==len(AI_pats[path.split('/')[-1]])
except: #If not print error - Should be checked by someone in that case
print("ERROR!! File should be checked manually")
print(traceback.format_exc())
error_flag=1
pass
try: #Confirm that we have the expected number of TPs, FPs and FNs
assert len(CT_scans_tp)+len(CT_scans_fn)==len(CT_scans)
assert len(AI_images_tp)+len(AI_images_fp)==len(AI_images) #873698 gives error
except: #If not print error - Should be checked by someone in that case
print("ERROR in images!! File should be checked manually")
print(traceback.format_exc())
error_flag_images=1
pass
try:
assert len(CT_scans_tp)+len(CT_scans_fn)==len(tp_final)+len(fn_final)
assert len(AI_images_tp)+len(AI_images_fp)==len(tp_AI_final)+len(fp_final)
except:
print("Should have error above if in here! File should be checked manually")
error_flag_images=1
print('TP list is {}'.format(tp_final))
print('FN list is {}'.format(fn_final))
print('FP list is {}'.format(fp_final))
print('TP_AI list is {}'.format(tp_AI_final))
print('Volumes of TP are: {}'.format(vols_tp))
print('Volumes of FN are: {}'.format(vols_fn))
print('Volumes of FP are: {}'.format(vols_fp))
print('Volumes of TP_AI are: {}'.format(vols_tp_AI))
print('Num of CT_scans',len(CT_scans))
print('Num of CT_scans_tp',len(CT_scans_tp))
print('Num of CT_scans_fn',len(CT_scans_fn))
print('Num of AI_images',len(AI_images))
print('Num of AI_images_tp',len(AI_images_tp))
print('Num of AI_images_fp',len(AI_images_fp))
#Initialize empty lists to keep track of indices of TP, FP, FN and TP_AI
ids_tp_final=[]
ids_fp_final=[]
ids_fn_final=[]
ids_tp_AI_final=[]
#Vols and loops added for cases like 892519 with same AI slice two times - Should work for same GT slice as well
#Loop over all volumes of nodules and find their IDs
#Below flags are used since not possible to use 'break'
for tp_vol in vols_tp: #loop over TP volumes
flag=0
for tp in tp_final: #Loop over TP slices
occurence=np.where(np.array(volume_ids)==tp_vol) #Find where this volume occurs in in volume_ids
occurence2=np.where(np.array(slice_vals)==tp) #Find where this slice occurs in GT slices
for i in occurence[0]: #Loop over indices where volume can be found
if np.where(np.array(occurence2)==i)[1].size: #If the slice exists that contains that volume
for j in range(len(occurence2[0])): #Loop over occurences of that slice
if occurence2[0][j]==i:
if nodule_ids[occurence2[0][j]] not in ids_tp_final: #Find slice of the volume and confirm that is not in TP slices
if nodule_ids[occurence2[0][j]] not in ids_fn_final: #And not in FN slices
if flag==0: #If not already added
ids_tp_final.append(nodule_ids[occurence2[0][j]]) #Add it to TP slices
flag=1 #Set flag to 1 to avoid adding it again
for fp_vol in vols_fp: #Similar as above for FPs
flag=0
for fp in fp_final:
occurence=np.where(np.array(volumes_AI)==fp_vol)
occurence2=np.where(np.array(AI_pats[path.split('/')[-1]])==fp)
for i in occurence[0]:
if np.where(np.array(occurence2)==i)[0].size:
for j in range(len(occurence2[0])):
if occurence2[0][j]==i:
if AI_nodule_ids[occurence2[0][j]] not in ids_fp_final:
if AI_nodule_ids[occurence2[0][j]] not in ids_tp_AI_final:
if flag==0:
ids_fp_final.append(AI_nodule_ids[occurence2[0][j]])
flag=1
for fn_vol in vols_fn: #Similar as above for FNs
flag=0
for fn in fn_final:
occurence=np.where(np.array(volume_ids)==fn_vol)
occurence2=np.where(np.array(slice_vals)==fn)
for i in occurence[0]:
if np.where(np.array(occurence2)==i)[0].size:
for j in range(len(occurence2[0])):
if occurence2[0][j]==i:
if nodule_ids[occurence2[0][j]] not in ids_tp_final:
if nodule_ids[occurence2[0][j]] not in ids_fn_final:
if flag==0:
ids_fn_final.append(nodule_ids[occurence2[0][j]])
flag=1
for tp_vol_AI in vols_tp_AI: #Similar as above for TP_AI
flag=0
for tp_AI in tp_AI_final:
occurence=np.where(np.array(volumes_AI)==tp_vol_AI)
occurence2=np.where(np.array(AI_pats[path.split('/')[-1]])==tp_AI)
for i in occurence[0]:
if np.where(np.array(occurence2)==i)[0].size:
for j in range(len(occurence2[0])):
if occurence2[0][j]==i:
if AI_nodule_ids[occurence2[0][j]] not in ids_fp_final:
if AI_nodule_ids[occurence2[0][j]] not in ids_tp_AI_final:
if flag==0:
ids_tp_AI_final.append(AI_nodule_ids[occurence2[0][j]])
flag=1
print("IDs of TP are {}".format(ids_tp_final))
print("IDs of FN are {}".format(ids_fn_final))
print("IDs of FP are {}".format(ids_fp_final))
print("IDs of TP_AI are {}".format(ids_tp_AI_final))
try: #Confirm that we count all nodules
assert len(ids_tp_final)==len(vols_tp)==len(tp_final)
assert len(ids_fp_final)==len(vols_fp)==len(fp_final)
assert len(ids_fn_final)==len(vols_fn)==len(fn_final)
assert len(ids_tp_AI_final)==len(vols_tp_AI)==len(tp_AI_final)
except:
print("ERROR! Some nodule were missed or double counted! File should be checked manually")
error_flag=1
try:
assert(len(tp_AI_final)+len(fp_final)==len(ids_tp_AI_final)+len(ids_fp_final))
except:
print("Error! Some errors in the above loop that tries to fix issue of same AI slice two times")
print('\n')
if len(tp_AI_final)!=len(tp_final):
print("Error! List tp_AI_final does not contain only matching TP of radiologists or contains fewer than them! File should be checked manually")
error_flag=1
print('\n')
print('-----------------------------------------------------------------------------------')
print("\n")
#Plot FP and FN slices with nodule contour around them
#Cases 440453 confuse FP and TP in consecutive slices and 278319 similarly
for ind_fn,one_box in enumerate(CT_scans_fn): #Loop over FN images
plt.ioff()
plt.figure()
plt.imshow(one_box)
plt.title(str(int(CT_scans_fn_slices[ind_fn])))
ind=0 #Here for same slices to avoid overlapping previously saved image
while os.path.exists(outputs[index_init]+'/'+str(int(CT_scans_fn_slices[ind_fn]))+'fnlastfinal_gray'+str(ind)+'.png'):
ind=ind+1 #If more than one FN in same slice then name them 1,2 etc.
while os.path.exists(outputs[index_init]+'/0'+str(int(CT_scans_fn_slices[ind_fn]))+'fnlastfinal_gray'+str(ind)+'.png'):
ind=ind+1 #If more than one FN in same slice then name them 1,2 etc.
while os.path.exists(outputs[index_init]+'/00'+str(int(CT_scans_fn_slices[ind_fn]))+'fnlastfinal_gray'+str(ind)+'.png'):
ind=ind+1 #If more than one FN in same slice then name them 1,2 etc.
try:
if vols_fn[ind_fn]>=30 and len(CT_scans_fn)==len(vols_fn): #If vol>30 and we don't have any image errors
if int(CT_scans_fn_slices[ind_fn])<100: #To have them ordered to be reviewed faster
if int(CT_scans_fn_slices[ind_fn])<10:
plt.savefig(outputs[index_init]+'/00'+str(int(CT_scans_fn_slices[ind_fn]))+'fnlastfinal_gray'+str(ind)+'.png',dpi=200)
else:
plt.savefig(outputs[index_init]+'/0'+str(int(CT_scans_fn_slices[ind_fn]))+'fnlastfinal_gray'+str(ind)+'.png',dpi=200)
else:
plt.savefig(outputs[index_init]+'/'+str(int(CT_scans_fn_slices[ind_fn]))+'fnlastfinal_gray'+str(ind)+'.png',dpi=200)
except:
if len(CT_scans_fn)!=len(vols_fn): #For error cases save all images since they will be checked manually
if int(CT_scans_fn_slices[ind_fn])<100: #To have them ordered to be reviewed faster
if int(CT_scans_fn_slices[ind_fn])<10:
plt.savefig(outputs[index_init]+'/00'+str(int(CT_scans_fn_slices[ind_fn]))+'fnlastfinal_gray'+str(ind)+'.png',dpi=200)
else:
plt.savefig(outputs[index_init]+'/0'+str(int(CT_scans_fn_slices[ind_fn]))+'fnlastfinal_gray'+str(ind)+'.png',dpi=200)
else:
plt.savefig(outputs[index_init]+'/'+str(int(CT_scans_fn_slices[ind_fn]))+'fnlastfinal_gray'+str(ind)+'.png',dpi=200)
plt.close()
for ind_fp,one_box in enumerate(AI_images_fp): #Same for FP images
plt.ioff()
plt.figure()
plt.imshow(one_box)
plt.title(str(int(AI_images_fp_slice[ind_fp])))
ind=0
while os.path.exists(outputs[index_init]+'/'+str(int(AI_images_fp_slice[ind_fp]))+'fpAIlastfinal_gray'+str(ind)+'.png'):
ind=ind+1
while os.path.exists(outputs[index_init]+'/0'+str(int(AI_images_fp_slice[ind_fp]))+'fpAIlastfinal_gray'+str(ind)+'.png'):
ind=ind+1
while os.path.exists(outputs[index_init]+'/00'+str(int(AI_images_fp_slice[ind_fp]))+'fpAIlastfinal_gray'+str(ind)+'.png'):
ind=ind+1
try: #Since when we have errors we might have more FP images, and therefore we won't be able to access ind_fp in vols
if vols_fp[ind_fp]>=30 and len(AI_images_fp)==len(vols_fp): #If vol>30 and we don't have any image errors
if int(AI_images_fp_slice[ind_fp])<100: #To have them ordered to be reviewed faster
if int(AI_images_fp_slice[ind_fp])<10:
plt.savefig(outputs[index_init]+'/00'+str(int(AI_images_fp_slice[ind_fp]))+'fpAIlastfinal_gray'+str(ind)+'.png',dpi=200)
else:
plt.savefig(outputs[index_init]+'/0'+str(int(AI_images_fp_slice[ind_fp]))+'fpAIlastfinal_gray'+str(ind)+'.png',dpi=200)
else:
plt.savefig(outputs[index_init]+'/'+str(int(AI_images_fp_slice[ind_fp]))+'fpAIlastfinal_gray'+str(ind)+'.png',dpi=200)
except:
if len(AI_images_fp)!=len(vols_fp): #For error cases save all images since they will be checked manually
if int(AI_images_fp_slice[ind_fp])<100: #To have them ordered to be reviewed faster
if int(AI_images_fp_slice[ind_fp])<10:
plt.savefig(outputs[index_init]+'/00'+str(int(AI_images_fp_slice[ind_fp]))+'fpAIlastfinal_gray'+str(ind)+'.png',dpi=200)
else:
plt.savefig(outputs[index_init]+'/0'+str(int(AI_images_fp_slice[ind_fp]))+'fpAIlastfinal_gray'+str(ind)+'.png',dpi=200)
else:
plt.savefig(outputs[index_init]+'/'+str(int(AI_images_fp_slice[ind_fp]))+'fpAIlastfinal_gray'+str(ind)+'.png',dpi=200)
plt.close()
#Add information about nodules (id and volume) to dataframe
dict_add=dict.fromkeys(column_names) #Get column names
dict_add['participant_id']=path.split('/')[-1] #get participant_id
small_nodule_flag=0 #To not take into account findings smaller than 30mm3 when calculating number of TP, FP, and FN
if error_flag==0 or distance_error==1: #If no errors
try: #Since we might have some errors if above process didn't work for that participant
#Initialize total number of findings in each volume subgroup for each of TP, FP, and FN to 0
tp_100=0
tp_100_300=0
tp_300plus=0
fp_100=0
fp_100_300=0
fp_300plus=0
fn_100=0
fn_100_300=0
fn_300plus=0
for ai_ind,ai_id in enumerate(ids_tp_AI_final): #Loop over TP and fill corresponding fields
dict_add['AI_nod'+str(ai_id)]=int(tp_AI_final[ai_ind]) #AI nodule ID
dict_add['V'+str(ai_id)]=vols_tp_AI[ai_ind] #Volume of tha nodule
#Those below could also added in the 'ids_tp_final' loop below
if float(vols_tp[ai_ind])<=100 and float(vols_tp[ai_ind])>=30:
tp_100=tp_100+1
elif float(vols_tp[ai_ind])>100 and float(vols_tp[ai_ind])<=300:
tp_100_300=tp_100_300+1
elif float(vols_tp[ai_ind])>300:
tp_300plus=tp_300plus+1
else:
print("Error! For TP, Volume in GT <30mm3 and equal to {}mm3. Might have subsolid component as well that wasn't considered".format(float(vols_tp[ai_ind])))
print('\n')
for ai_ind_fp,ai_id_fp in enumerate(ids_fp_final): #Same as above for FPs
dict_add['AI_nod'+str(ai_id_fp)]=int(fp_final[ai_ind_fp])
dict_add['V'+str(ai_id_fp)]=vols_fp[ai_ind_fp]
if float(vols_fp[ai_ind_fp])<=100 and float(vols_fp[ai_ind_fp])>=30:
fp_100=fp_100+1
elif float(vols_fp[ai_ind_fp])>100 and float(vols_fp[ai_ind_fp])<=300:
fp_100_300=fp_100_300+1
elif float(vols_fp[ai_ind_fp])>300:
fp_300plus=fp_300plus+1
else:
print("FP nodule with volume {}mm3 not taken into account".format(float(vols_fp[ai_ind_fp])))
print('\n')
small_nodule_flag=1
for ai_ind_tp,ai_id_tp in enumerate(ids_tp_final): #Here only to get TP_AI ID and fill corresponding column
ai_ind=ids_tp_AI_final[ai_ind_tp]
dict_add['AI_nod'+str(ids_tp_AI_final[ai_ind_tp])]=str(dict_add['AI_nod'+str(ids_tp_AI_final[ai_ind_tp])])+' - L'+str(ai_id_tp)
for ind_fn,id_fn in enumerate(ids_fn_final): #Same for FNs
if float(vols_fn[ind_fn])<=100 and float(vols_fn[ind_fn])>=30:
fn_100=fn_100+1
elif float(vols_fn[ind_fn])>100 and float(vols_fn[ind_fn])<=300:
fn_100_300=fn_100_300+1
elif float(vols_fn[ind_fn])>300:
fn_300plus=fn_300plus+1
else:
print("Error! For FN, Volume in GT <30mm3 and equal to {}mm3".format(float(vols_fn[ind_fn])))
print('\n')
#Add all the above calculated values to the proper names in dictionary
dict_add['0-100tp']=tp_100
dict_add['100-300tp']=tp_100_300
dict_add['300+ tp']=tp_300plus
dict_add['0-100fp']=fp_100
dict_add['100-300fp']=fp_100_300
dict_add['300+ fp']=fp_300plus
dict_add['0-100fn']=fn_100
dict_add['100-300fn']=fn_100_300
dict_add['300+ fn']=fn_300plus
#If no TP, FP, and FNs then fill in position 'AI_nod1' that there were no nodules
if tp_100==0 and tp_100_300==0 and tp_300plus==0 and fp_100==0 and fp_100_300==0 and fp_300plus==0 and small_nodule_flag==0:
dict_add['AI_nod1']='nonods'
except: #Don't fill anything if any errors for that participant
pass
#If error in images only (or error with two same AI or GT images) then add '!!!' - Might also confuse TP ids when distance error
if (error_flag_images==1 and error_flag==0) or distance_error==1:
dict_add['AI_nod1']='!!!'+str(dict_add['AI_nod1'])
#if any error with TP, FP, or FN then add 'xxx'
if error_flag==1 and error_flag_images==1:
dict_add['AI_nod1']='xxx'+str(dict_add['AI_nod1'])
df_all=df_all.append(dict_add,ignore_index=True) #Add the dictionary to dataframe
print('-----------------------------------------------------------------------------------')
print("\n")
#All below just for print AI_slices, original_CT_slices, annotations and SEG files
#Some prints were commented since they were incorrect, like 'Ground truth is'...
slice_vals_found=[] #Empty lists to be filled with the slice values from REDCap that were detected by AI below
slice_vals=np.array(slice_vals) #Since for case 757591 we get a tuple and so, an error below
#Get which detections of AI correspond to which slices of the original CT scan
#There are also FP detections from AI that are not taken into account here
detectionsCT=[]
detectionsAI=[]
if list(AI_num_nods[index_init].keys())!=[]: #If there are slices with nodules in AI outputs
for AI_file in AI_num_nods[index_init].keys(): #Loop over AI slices with nodules
for orig_CT_slice in original_CTs[index_init]: #Loop over original CT slices with nodules
if int(AI_file.split('.')[4])==size_CTs[index_init]-int(orig_CT_slice.split('.')[4]): #If there is correspondance between slices of original CT and AI output (reverse order) then we have a match
detectionsCT.append(orig_CT_slice) #Add original CT slice to list
detectionsAI.append(AI_file) #Add AI output slice to list
else: #If there are no AI detections
if slice_vals.size!=0: #And if there are manual annotations
print('IMPORTANT!: There no AI detections but there are manual annotations')
else:
print("IMPORTANT!: There are no AI detections and there are no manual annotations")
print('\n')
#If we have AI outputs, print the correspondance for all (SEG_files, AI outputs, Original CT slices and Annotated CT slices)
if list(AI_pats[path.split('/')[-1]])!=[]:
for ind1,CT in enumerate(detectionsCT): #Loop over original CT slices for which we have AI detections
for ind2, orig_CT in enumerate(original_CTs_final[index_init]): #Loop over a similar list of original slices for which we have AI detections but with a different order (index is used below so that there is correspondance)
if CT==orig_CT: #If we have the same CT slice in both lists
if int(original_CTs_final[index_init][ind2].split('.')[4]) in slice_vals: #Check if this is an actual nodule
try:
for i in np.where(slice_vals==int(original_CTs_final[index_init][ind2].split('.')[4]))[0]: #Loop over indices of REDCap slices that exist in annotated CTs as well
nod_ids=int(nodule_ids[i]) #Get IDs of these nodules from REDCap (L01 etc. in Syngo.via manual annotations)
volumes=float(volume_ids[i]) #Get volumes of these nodules
# print("Ground truth: The volume of this is {}, the ID in manual annotations is {}, and the slice is {}".format(volumes,nod_ids,np.unique(slice_vals[np.where(slice_vals==int(original_CTs_final[index_init][ind2].split('.')[4]))])))
except:
pass
slice_vals_found.append(int(original_CTs_final[index_init][ind2].split('.')[4])) #True nodule found
try: #If SEG file exists print correspondance
# print(colored('Segmentation mask is: {}, slice {}', 'green').format(SEG_masks[index_init][ind2],detectionsAI[ind1].split('.')[4]))
print('Segmentation mask is: {}, slice {}'.format(SEG_masks[index_init][ind2],detectionsAI[ind1].split('.')[4]))
except IndexError:
print("No SEG mask available")
pass
# print(colored('Annotated CT is: {}', 'green').format(annotated_CTs_final[index_init][ind2]))
# print(colored('Original CT image is: {}', 'green').format(original_CTs_final[index_init][ind2]))
# print(colored('AI output image with nodules is: {}', 'green').format(detectionsAI[ind1]))
print('Annotated CT is: {}'.format(annotated_CTs_final[index_init][ind2]))
print('Original CT image is: {}'.format(original_CTs_final[index_init][ind2]))
print('AI output image with nodules is: {}'.format(detectionsAI[ind1]))
print("\n")
else: #possible FP - maybe also be TP that extends from the slice that exists in slice_vals
for i in slice_vals: #Check slices close to slice_vals to see if we have annotations - maybe also be TP but for now added in FP
if int(original_CTs_final[index_init][ind2].split('.')[4]) in range(i-1,i-5,-1) or int(original_CTs_final[index_init][ind2].split('.')[4]) in range(i+1,i+6):
# print("High chances of having TP (even though not same slice as in REDCap or no annotation file available) for the following:")
try:
nod_ids=int(nodule_ids[np.where(slice_vals==int(original_CTs_final[index_init][ind2].split('.')[4]))])
volumes=float(volume_ids[np.where(slice_vals==int(original_CTs_final[index_init][ind2].split('.')[4]))])
# print("If so, the volume of this is {} and the ID in manual annotations is {} and slice is {}".format(volumes,nod_ids,slice_vals[np.where(slice_vals==int(original_CTs_final[index_init][ind2].split('.')[4]))]))
except:
pass
try:
# print(colored('Segmentation mask is: {}, slice {}', 'green').format(SEG_masks[index_init][ind2],detectionsAI[ind1].split('.')[4]))
print('Segmentation mask is: {}, slice {}'.format(SEG_masks[index_init][ind2],detectionsAI[ind1].split('.')[4]))
except IndexError:
print("No SEG mask available")
pass
# print(colored('Annotated CT is: {}', 'green').format(annotated_CTs_final[index_init][ind2]))
# print(colored('Original CT image is: {}', 'green').format(original_CTs_final[index_init][ind2]))
# print(colored('AI output image with nodules is: {}', 'green').format(detectionsAI[ind1]))
print('Annotated CT is: {}'.format(annotated_CTs_final[index_init][ind2]))
print('Original CT image is: {}'.format(original_CTs_final[index_init][ind2]))
print('AI output image with nodules is: {}'.format(detectionsAI[ind1]))
print("\n")
for ind3, CT_file in enumerate(original_CTs_final[index_init]): #Print files with nodules not found by the AI
if CT_file not in detectionsCT: #Since it contains only slices that were also found by the AI
if int(original_CTs_final[index_init][ind3].split('.')[4]) in slice_vals: #Check if this is an actual nodule
slice_vals_found.append(int(original_CTs_final[index_init][ind3].split('.')[4])) #To make sure that we have taken it into account
# print('True nodule not detected by AI:')
try:
# print(colored('Segmentation mask is: {}', 'green').format(SEG_masks[index_init][ind3]))
print('Segmentation mask is: {}'.format(SEG_masks[index_init][ind3]))
except IndexError:
pass
# print(colored('Annotated CT is: {}', 'green').format(annotated_CTs_final[index_init][ind3]))
# print(colored('Original CT image is: {}', 'green').format(original_CTs_final[index_init][ind3]))
print('Annotated CT is: {}'.format(annotated_CTs_final[index_init][ind3]))
print('Original CT image is: {}'.format(original_CTs_final[index_init][ind3]))
print("\n")
else:
# print("No true nodule and not detected by AI or no annotation file available")
try:
# print(colored('Segmentation mask is: {}', 'green').format(SEG_masks[index_init][ind3]))
print('Segmentation mask is: {}'.format(SEG_masks[index_init][ind3]))
except:
pass
# print(colored('Annotated CT is: {}', 'green').format(annotated_CTs_final[index_init][ind3]))
# print(colored('Original CT image is: {}', 'green').format(original_CTs_final[index_init][ind3]))
print('Annotated CT is: {}'.format(annotated_CTs_final[index_init][ind3]))
print('Original CT image is: {}'.format(original_CTs_final[index_init][ind3]))
print("\n")
#We never get in the loop below - Confirmed
for ind4, CT_file_rest in enumerate(original_CTs[index_init]): #Print files with nodules not found by the AI and not having SEG file
if CT_file_rest not in detectionsCT and CT_file_rest in slice_vals: #CT_file_rest not in original_CTs_final[index_init] and
print("ERROR!! We didn't expect to be here!") #Confirmed
# print(colored('Annotated CT is: {}', 'green').format(annotated_CT_files[index_init][ind4]))
# print(colored('Original CT image is: {}', 'green').format(original_CTs[index_init][ind4]))
print('Annotated CT is: {}'.format(annotated_CT_files[index_init][ind4]))
print('Original CT image is: {}'.format(original_CTs[index_init][ind4]))
print("\n")
#We should only be in here when there are manual annotations but no AI detections (FN)
else: #otherwise print only SEG files, annotated CT images and original CT slices (if no AI outputs) - only for SEG files that exist
for index in range(len(original_CTs_final[index_init])):
try:
# print(colored('Segmentation mask is: {}', 'green').format(SEG_masks[index_init][index]))
print('Segmentation mask is: {}'.format(SEG_masks[index_init][index]))
except IndexError:
print('SEG mask was not available')
print("We got in here because we didn't have any AI files. This could be because no nodules detected by AI or because no files provided by us.")
# print(colored('Annotated CT is: {}', 'green').format(annotated_CTs_final[index_init][index]))
# print(colored('Original CT image is: {}', 'green').format(original_CTs_final[index_init][index]))
print('Annotated CT is: {}'.format(annotated_CTs_final[index_init][index]))
print('Original CT image is: {}'.format(original_CTs_final[index_init][index]))
print("\n")
print('-----------------------------------------------------------------------------------')
print("\n")
#Print all errors that may exist
if errors_SEG[index_init]!=[] and errors_SEG[index_init]!=['No Segmentation Files available']:
print('There were errors in the following SEG files (step1): {}'.format(errors_SEG[index_init]))
if empty_CT_files[index_init]!=[]:
print('There were errors in the following CT files (step2): {}'.format(empty_CT_files[index_init]))
if possible_nodules_excluded[index_init]!=[]:
print('Possible nodules excluded due to low threshold value (step2): {}'.format(possible_nodules_excluded[index_init]))
try:
if SEG_masks_errors[index_init]!=[] and (len(SEG_masks_errors[index_init])>1 or (SEG_masks_errors[index_init][0] not in SEG_masks[index_init])): #If more than enough errors in SEG files,
# or 1 but which not added to SEG files list
print('Problem with SEG files {}'.format(SEG_masks_errors[index_init]))
except:
print("In the Except statement here")
print("Size of segmentation file is {}".format(size_SEG[index_init]))
print("\n")
end=time.time()
print("Time it took to run full code is {} secs".format(end-start))
sys.stdout.close()
sys.stdout = sys.__stdout__
#Add two new columns to df, one with the IDs of nodules, and one with their slice numbers
IDs=[]
pat_names_ids=[]
for pat,ids in RedCap_ids.items(): #Loop over all IDs and add participants and their IDs to corresponding lists
pat_names_ids.append(pat)
IDs.append(ids)
slice_ids=[]
pat_names_slices=[]
for pat,slices in RedCap_pats.items(): #Same as above for participants and their slice numbers
pat_names_slices.append(pat)
slice_ids.append(slices)
if list(df_all['participant_id'])==pat_names_ids: #Add IDs as second column (since below we add slices as first again)
df_all.insert(0,'IDs',IDs)
if list(df_all['participant_id'])==pat_names_slices: #Add slices as first column
df_all.insert(0,'Slice numbers',slice_ids)
#Taken from stackoverflow.com/questions/54109548/how-to-save-pandas-to-excel-with-different-colors
#In the final df replace 0 with nan and save it to xlsx file
writer=pd.ExcelWriter(output_path.split('/')[-2]+'.xlsx',engine='xlsxwriter') #Create df with XlsxWriter as engine
df_all.to_excel(writer,sheet_name='Sheet1',index=False) #Convert dataframe to excel
#Get xlsxwriter workbook and worksheet objects
workbook=writer.book
worksheet=writer.sheets['Sheet1']
#Add a format - Light red fill with dark red text
format1=workbook.add_format({'font_color':'#9C0006','bg_color':'#FFC7CE'})
#Set conditional format range
start_row=1
end_row=len(df_all)
start_col=2 #Start at index 2 since the first to columns are the IDs and the slices with nodules
end_col=3
#Taken from xlsxwriter.readthedocs.io/working_with_conditional_formats.html
#Apply conditions to cell range
worksheet.conditional_format(start_row,start_col,end_row,end_col, #For empty cells the above format
{'type':'blanks',
'format':format1})
worksheet.conditional_format(start_row,start_col,end_row,end_col, #Same for cells tht need to manually checked 'xxx'
{'type':'text',
'criteria':'begins with',
'value':'xxx',
'format':format1})
#https://xlsxwriter.readthedocs.io/working_with_colors.html#colors
format2=workbook.add_format({'bg_color':'orange'}) #New format with orange for cases with errors only in images - These should also be checked manually
worksheet.conditional_format(start_row,start_col,end_row,end_col,
{'type':'text',
'criteria':'begins with',
'value':'!!!',
'format':format2})
writer.close() #Save the writer - if .save() is used then excel file saved as 'locked'
#Below some failure cases extracted from comments in the code:
#Failure cases when same slice more than two times in GT slices
#TP images are not extracted properly for now
#Might fail if GT slice in original_CTs (not in 'Results') and not any AI nodules
#If two FN in the same slice it might fail
#Probably fails when one TP and one FP in the same slice (as in 673634)
#Fails when 3 consecutive FP slices | nsourlos/Siemens_AIRadCompanion_automatic_comparison | automated_nodule_comparison.py | automated_nodule_comparison.py | py | 154,938 | python | en | code | 0 | github-code | 13 |
16352036592 | import numba as nb
import numpy as np
@nb.njit(fastmath=False, parallel=False)
def closest(point_assume_f, grid_points_list_f):
N = grid_points_list_f.shape[0]
i1 = 0
maxi = 11.0
for i in range(N):
distance = np.arccos(
grid_points_list_f[i, 0] * point_assume_f[0]
+ grid_points_list_f[i, 1] * point_assume_f[1]
+ grid_points_list_f[i, 2] * point_assume_f[2]
)
if distance < maxi:
i1 = i
maxi = distance
return i1
@nb.njit(fastmath=False, parallel=False)
def trfind(point_assume_f, grid_points_list_f):
N = grid_points_list_f.shape[0]
i1 = 0
i2 = 0
i3 = 0
mini = 9.0
midi = 10.0
maxi = 11.0
for i in range(N):
distance = np.arccos(
grid_points_list_f[i, 0] * point_assume_f[0]
+ grid_points_list_f[i, 1] * point_assume_f[1]
+ grid_points_list_f[i, 2] * point_assume_f[2]
)
if distance < maxi:
if distance > mini:
if distance < midi:
maxi = midi
i3 = i2
midi = distance
i2 = i
else:
maxi = distance
i3 = i
else:
maxi = midi
midi = mini
mini = distance
i3 = i2
i2 = i1
i1 = i
weights = calc_weights_numba(
grid_points_list_f[i1],
grid_points_list_f[i2],
grid_points_list_f[i3],
point_assume_f,
)
weights.sort()
return weights[2], weights[1], weights[0], i1, i2, i3
@nb.njit(fastmath=True)
def calc_weights_numba(p1, p2, p3, p_find):
"""
###################### Weights from https://codeplea.com/triangular-interpolation ############
p1_lat = np.arcsin(p1[2])
p1_lon = np.arctan2(p1[1],p1[0])
p2_lat = np.arcsin(p2[2])
p2_lon = np.arctan2(p2[1],p2[0])
p3_lat = np.arcsin(p3[2])
p3_lon = np.arctan2(p3[1],p3[0])
pf_lat = np.arcsin(p_find[2])
pf_lon = np.arctan2(p_find[1],p_find[0])
W1 = ((p2_lat-p3_lat)*(pf_lon-p3_lon)+(p3_lon-p2_lon)*(pf_lat-p3_lat))/((p2_lat-p3_lat)*(p1_lon-p3_lon)+(p3_lon-p2_lon)*(p1_lat-p3_lat))
W2 = ((p3_lat-p1_lat)*(pf_lon-p3_lon)+(p1_lon-p3_lon)*(pf_lat-p3_lat))/((p2_lat-p3_lat)*(p1_lon-p3_lon)+(p3_lon-p2_lon)*(p1_lat-p3_lat))
W3 = 1-W1-W2
"""
###################### Weights from ftran code. But NOT set to 0 when they are negative ############
w = np.zeros(3)
w[0] = (
p_find[0] * (p1[1] * p2[2] - p2[1] * p1[2])
- p_find[1] * (p1[0] * p2[2] - p2[0] * p1[2])
+ p_find[2] * (p1[0] * p2[1] - p2[0] * p1[1])
)
w[1] = (
p_find[0] * (p2[1] * p3[2] - p3[1] * p2[2])
- p_find[1] * (p2[0] * p3[2] - p3[0] * p2[2])
+ p_find[2] * (p2[0] * p3[1] - p3[0] * p2[1])
)
w[2] = (
p_find[0] * (p3[1] * p1[2] - p1[1] * p3[2])
- p_find[1] * (p3[0] * p1[2] - p1[0] * p3[2])
+ p_find[2] * (p3[0] * p1[1] - p1[0] * p3[1])
)
return np.abs(w)
@nb.njit(fastmath=True)
def geocoords(theta_geo, phi_geo, theta_source, phi_source):
gx = np.empty(3)
gy = np.empty(3)
gz = np.empty(3)
sl = np.empty(3)
gz[0] = np.sin(theta_geo) * np.cos(phi_geo)
gz[1] = np.sin(theta_geo) * np.sin(phi_geo)
gz[2] = np.cos(theta_geo)
gzr = np.sqrt(gz[0] * gz[0] + gz[1] * gz[1] + gz[2] * gz[2])
gz[0] = gz[0] / gzr
gz[1] = gz[1] / gzr
gz[2] = gz[2] / gzr
sl[0] = np.sin(theta_source) * np.cos(phi_source)
sl[1] = np.sin(theta_source) * np.sin(phi_source)
sl[2] = np.cos(theta_source)
slr = np.sqrt(sl[0] * sl[0] + sl[1] * sl[1] + sl[2] * sl[2])
sl[0] = sl[0] / slr
sl[1] = sl[1] / slr
sl[2] = sl[2] / slr
gy[0] = gz[1] * sl[2] - gz[2] * sl[1]
gy[1] = gz[2] * sl[0] - gz[0] * sl[2]
gy[2] = gz[0] * sl[1] - gz[1] * sl[0]
gyr = np.sqrt(gy[0] * gy[0] + gy[1] * gy[1] + gy[2] * gy[2])
gy[0] = gy[0] / gyr
gy[1] = gy[1] / gyr
gy[2] = gy[2] / gyr
gx[0] = gy[1] * gz[2] - gy[2] * gz[1]
gx[1] = gy[2] * gz[0] - gy[0] * gz[2]
gx[2] = gy[0] * gz[1] - gy[1] * gz[0]
gxr = np.sqrt(gx[0] * gx[0] + gx[1] * gx[1] + gx[2] * gx[2])
gx[0] = gx[0] / gxr
gx[1] = gx[1] / gxr
gx[2] = gx[2] / gxr
return gx, gy, gz, sl
@nb.njit(fastmath=True)
def geo_to_space(theta_u, phi_u, gx, gy, gz):
dtr = np.pi / 180.0
xg = np.sin(theta_u * dtr) * np.cos(phi_u * dtr)
yg = np.sin(theta_u * dtr) * np.sin(phi_u * dtr)
zg = np.cos(theta_u * dtr)
dirx = xg * gx[0] + yg * gy[0] + zg * gz[0]
diry = xg * gx[1] + yg * gy[1] + zg * gz[1]
dirz = xg * gx[2] + yg * gy[2] + zg * gz[2]
r = np.sqrt(dirx * dirx + diry * diry + dirz * dirz)
dirx = dirx / r
diry = diry / r
dirz = dirz / r
az = np.arctan2(diry, dirx) / dtr
if az < 0.0:
az = az + 360.0
el = 90.0 - np.arccos(dirz) / dtr
return dirx, diry, dirz, az, el
@nb.njit(fastmath=True)
def calc_sphere_dist(ra1, dec1, ra2, dec2, dtr):
y = np.sqrt(
(np.cos(dec2 * dtr) * np.sin((ra1 - ra2) * dtr)) ** 2
+ (
np.cos(dec1 * dtr) * np.sin(dec2 * dtr)
- np.sin(dec1 * dtr) * np.cos(dec2 * dtr) *
np.cos((ra1 - ra2) * dtr)
)
** 2
)
x = np.sin(dec1 * dtr) * np.sin(dec2 * dtr) + np.cos(dec1 * dtr) * np.cos(
dec2 * dtr
) * np.cos((ra1 - ra2) * dtr)
return np.arctan2(y, x) / dtr
@nb.njit(fastmath=True)
def highres_ephoton_interpolator(
ebin_edge_in, ein, matrix, edif_edge_lo, edif_edge_hi, nhbins
):
nobins_in = len(ebin_edge_in)
nvbins = len(ein) - 1
new_epx_lo = np.zeros((nobins_in, 64))
new_epx_hi = np.zeros((nobins_in, 64))
diff_matrix = np.zeros((nobins_in, 64))
ivfind = 1
for i in range(nobins_in):
for j in range(ivfind, 70):
if (ebin_edge_in[i] >= ein[j - 1]) and ebin_edge_in[i] < ein[j]:
ivfind = j
mu = (np.log(ebin_edge_in[i]) - np.log(ein[ivfind - 1])) / (
np.log(ein[ivfind]) - np.log(ein[ivfind - 1])
)
if (mu < 0.0) and (mu > -1e-5):
mu = 0.0
elif (mu > 1.0) and (mu < 1.00001):
mu = 1.0
for k in range(nhbins):
# print(ivfind, k)
new_epx_lo[i, k] = (
edif_edge_lo[ivfind - 1, k] /
ein[ivfind - 1] * (1 - mu)
+ edif_edge_lo[ivfind, k] / ein[ivfind] * mu
) * ebin_edge_in[i]
new_epx_hi[i, k] = (
edif_edge_hi[ivfind - 1, k] /
ein[ivfind - 1] * (1 - mu)
+ edif_edge_hi[ivfind, k] / ein[ivfind] * mu
) * ebin_edge_in[i]
diff_matrix[i, k] = (
matrix[ivfind - 1, k] *
(1 - mu) + matrix[ivfind, k] * mu
)
return new_epx_lo, new_epx_hi, diff_matrix
@nb.njit(fastmath=True)
def atscat_highres_ephoton_interpolator(ebin_edge_in, ein, matrix):
nobins_in = len(ebin_edge_in) - 1
nvbins = len(ein) - 1
nobins_out = matrix.shape[1]
new_matrix = np.zeros((nobins_in, nobins_out))
ivfind = 0
# max_i = np.searchsorted(ebin_edge_in, ein[-1])
# min_i = np.searchsorted(ebin_edge_in, ein[0])
for i in range(nobins_in):
for j in range(ivfind, nvbins):
if (ebin_edge_in[i] >= ein[j]) and (ebin_edge_in[i] < ein[j + 1]):
ivfind = j
mu = (np.log(ebin_edge_in[i]) - np.log(ein[ivfind])) / (
np.log(ein[ivfind + 1]) - np.log(ein[ivfind])
)
if (mu < 0.0) and (mu > -1e-5):
mu = 0.0
elif (mu > 1.0) and (mu < 1.00001):
mu = 1.0
for k in range(nobins_out):
new_matrix[i, k] = (
matrix[ivfind, k] * (1 - mu) +
matrix[ivfind + 1, k] * mu
)
return new_matrix
@nb.njit(fastmath=True)
def echan_integrator(diff_matrix, edif_edge_lo, edif_edge_hi, nhbins, ebin_edge_out):
nobins_in = diff_matrix.shape[0]
nobins_out = len(ebin_edge_out) - 1
# check that this is the right order from FORTRAN
binned_matrix = np.zeros((nobins_in, nobins_out))
# binned_matrix = np.zeros((nobins_out,nobins_in))
row_tot = np.zeros(nobins_out + 1)
diff_matrix_vec = np.zeros(nhbins)
edif_edgeh = np.zeros(nhbins + 1)
edif_cent = np.zeros(nhbins)
# first is a loop over the photon energies
# row_entry = 0.
# ihover =0
for jcdif in range(1, nobins_in + 1):
total = 0
for ivh in range(1, nhbins + 1):
edif_cent[ivh - 1] = (
edif_edge_lo[jcdif - 1, ivh - 1] +
edif_edge_hi[jcdif - 1, ivh - 1]
) / 2.0
total += edif_cent[ivh - 1]
if edif_cent[ivh - 1] > 0:
diff_matrix_vec[ivh - 1] = diff_matrix[jcdif - 1, ivh - 1] / (
edif_edge_hi[jcdif - 1, ivh - 1] -
edif_edge_lo[jcdif - 1, ivh - 1]
)
edif_edgeh[ivh - 1] = edif_edge_hi[jcdif - 1, ivh - 1]
edif_edgeh[nhbins] = edif_edge_hi[jcdif - 1, nhbins - 1] + (
edif_edge_hi[
jcdif - 1,
nhbins - 1,
]
- edif_edge_hi[jcdif - 1, nhbins - 2]
)
ihlow = 0
ihhigh = 0
if total == 0:
continue
for ihbin in range(1, nobins_out + 1):
hlow = ebin_edge_out[ihbin - 1]
hhigh = ebin_edge_out[ihbin]
hwide = hhigh - hlow
if ihlow == 0:
ihlow = np.searchsorted(edif_cent, hlow)
if hlow <= edif_cent[0]:
# print('sec 1')
if hhigh > edif_cent[0]:
ihhigh = np.searchsorted(edif_cent, hhigh)
nhpoints = (ihhigh) + 2
hchunk = hwide / float(nhpoints - 1)
for icbin in range(1, nhpoints + 1):
euse = hlow + hchunk * float(icbin - 1)
if euse <= edif_cent[0]:
# print('sec 1 a')
row_entry = diff_matrix_vec[0] * \
euse / edif_cent[0]
else:
# print('sec 1 b')
icdif = 1
icdif = np.searchsorted(edif_cent, euse)
if icdif < (ihhigh + 1):
row_entry = diff_matrix_vec[icdif - 1] + (
diff_matrix_vec[icdif] -
diff_matrix_vec[icdif - 1]
) * (euse - edif_cent[icdif - 1]) / (
edif_cent[icdif] - edif_cent[icdif - 1]
)
# sum up horizontal
row_tot[ihbin - 1] += row_entry
row_entry = 0.0
##
row_tot[ihbin - 1] *= hwide / float(nhpoints)
# hwide = horizontal bin width
# nhpoints = # of samples used
# convert from counts/(unit energy) to counts/bin
else:
row_tot[ihbin - 1] = (
diff_matrix_vec[0]
* ((hlow + hhigh) / 2.0)
/ edif_cent[0]
* hwide
)
# if row_tot[ihbin] > 0: print(row_tot[ihbin], ihbin)
if ihlow >= nhbins:
# print('sec 2')
if hlow > edif_edgeh[nhbins]:
row_tot[ihbin - 1] = -1.0
ihover = ihbin
else:
if hhigh <= edif_edgeh[nhbins]:
row_tot[ihbin - 1] = (
diff_matrix_vec[nhbins - 1]
* (edif_edgeh[nhbins] - (hlow + hhigh) / 2.0)
/ (edif_edgeh[nhbins] - edif_cent[nhbins - 1])
* hwide
)
else:
row_tot[ihbin - 1] = (
((edif_edgeh[nhbins] - hlow) ** 2)
* diff_matrix_vec[nhbins - 1]
/ (2.0 * (edif_edgeh[nhbins] - edif_cent[nhbins - 1]))
)
elif ihlow >= 1: # could be zero??
if hhigh > edif_edgeh[nhbins]:
hwide = (
edif_edgeh[nhbins] - hlow
) # total width adjusted for active response range
nhpoints = (nhbins) - (ihlow) + 2
hchunk = hwide / float(nhpoints - 1)
for icbin in range(1, nhpoints + 1):
euse = hlow + hchunk * float(icbin - 1)
icdif = np.searchsorted(
edif_cent[ihlow:], euse) + ihlow
if icdif < nhbins: # again check the index
mu = (euse - edif_cent[icdif - 1]) / (
edif_cent[icdif] - edif_cent[icdif - 1]
)
mu2 = (1 - np.cos(mu * np.pi)) / 2.0
row_entry = (
diff_matrix_vec[icdif - 1]
+ (diff_matrix_vec[icdif] -
diff_matrix_vec[icdif - 1])
* mu2
)
row_tot[ihbin - 1] += row_entry
row_entry = 0.0
# del row_entry
row_tot[ihbin - 1] *= hwide / float(nhpoints)
ihlow = nhbins
else:
if hhigh > edif_cent[nhbins - 1]:
ihhigh = nhbins
else:
ihhigh = np.searchsorted(
edif_cent[ihlow:], hhigh) + ihlow
nhpoints = ihhigh - ihlow + 2
if nhpoints < 9:
nhpoints = 9
hchunk = hwide / float(nhpoints - 1)
for icbin in range(1, nhpoints + 1):
euse = hlow + hchunk * float(icbin - 1)
icdif = ihlow
while icdif < ihhigh + 1: # check
# print(icdif, nhbins -2, ihhigh)
if icdif <= nhbins - 1:
# print(edif_cent[icdif], euse, edif_cent[icdif+1] )
if (euse > edif_cent[icdif - 1]) and (
euse <= edif_cent[icdif]
):
row_entry = diff_matrix_vec[icdif - 1] + (
diff_matrix_vec[icdif]
- diff_matrix_vec[icdif - 1]
) * (euse - edif_cent[icdif - 1]) / (
edif_cent[icdif] - edif_cent[icdif - 1]
)
break
else:
row_entry = (
diff_matrix_vec[icdif - 1]
* (hhigh - edif_cent[nhbins - 1])
/ (edif_cent[nhbins - 1] - edif_cent[nhbins - 2])
)
icdif += 1
row_tot[ihbin - 1] += row_entry
row_entry = 0.0
# del row_entry
# print(row_tot[ihbin])
row_tot[ihbin - 1] *= hwide / float(
nhpoints
) # BB ADDED [ihbin] #############
ihlow = ihhigh
if row_tot[ihbin - 1] == -1:
break
if ihbin == nobins_out:
ihover = nobins_out + 1 # + 1
for ivhsum in range(1, ihover):
binned_matrix[jcdif - 1, ivhsum - 1] += row_tot[ivhsum - 1]
row_tot[ivhsum - 1] = 0.0
row_tot[ihover - 1] = 0.0
return binned_matrix
| grburgess/gbm_drm_gen | gbm_drm_gen/matrix_functions.py | matrix_functions.py | py | 17,080 | python | en | code | 3 | github-code | 13 |
11030353105 | #!/usr/bin/env python3
import csv
with open('padron.txt','r') as padron:
archivo = padron.readlines()
with open('padron_definitivo_2019.csv','w') as padron_csv:
padron_escritor = csv.writer(padron_csv, delimiter=',', quotechar='"', quoting=csv.QUOTE_MINIMAL)
padron_escritor.writerow(['NRO DNI', 'TIPO DNI', 'CLASE', 'APELLIDO', 'NOMBRES', 'DOMICILIO', 'CIRCUITO', 'MESA', 'ORDEN'])
lista = iter(archivo)
while True:
try:
linea = next(lista)
except StopIteration:
break
if linea[0:9] == 'CIRCUITO:':
circuito = linea[10:(linea.find("-") - 1)]
continue
elif linea[0:5] == 'MESA:':
mesa = linea[6:-1]
continue
elif linea[0:10] == 'NRO. ORDEN':
#flag = True
renglon = list()
linea = next(lista)
renglon.append(linea) #agrego el numero de orden al renglon
while True:
linea = next(lista)
renglon.append(linea)
if linea[-5:-1] == 'VOTÓ':
break
print(renglon)
lista_aux= renglon[-1][0:-1].split(' ')
print(lista_aux)
tipo_dni = lista_aux[2]
nro_dni = lista_aux[1]
clase = lista_aux[3]
domicilio = renglon[-2][0:-1]
orden = renglon[0][0:-1]
if len(renglon) == 4:
lista_aux = renglon[1].split(',')
apellido = lista_aux[0]
nombres = lista_aux[1][0:-1]
else:
lista_aux = renglon[1].split(',')
apellido = lista_aux[0]
nombres = lista_aux[1][0:-1] + renglon[2][0:-1]
padron_escritor.writerow([nro_dni,tipo_dni,clase,apellido,nombres,domicilio,circuito,mesa,orden])
else:
continue
"""if linea[0:10] == 'NRO. ORDEN':
flag = True
linea = next(lista)
orden = linea[0:-1]
linea = next(lista)
lista_aux = linea.split(',')
apellido = lista_aux[0]
nombres = linea[1][1:-1]
linea = next(lista)
domicilio = linea[0:-1]
linea = next(lista)
lista_aux = linea.split(' ')
print(lista_aux)
nro_dni = lista_aux[1]
tipo_dni = lista_aux[2]
clase = lista_aux[3]
else:
flag = False
if flag:
padron_escritor.writerow([nro_dni,tipo_dni,clase,apellido,nombres,domicilio,circuito,mesa,orden])"""
| jpgim/extraer-info-padron | padron.py | padron.py | py | 2,061 | python | es | code | 0 | github-code | 13 |
14880515237 | '''
Ввести небольшое натуральное число 2⩽N⩽1000000 и проверить,
является ли оно степенью натурального числа (>1).
Вывести YES или NO соответственно.
Input:
1024
Output:
YES
'''
import itertools
def PowTest(val):
'''
алгоритм выдуманный за 5 минут, похоже он очень медленный
не используйте его нигде, кроме учебных целей
:param val:
:return:
'''
# определяем максимальную степень
t = val/2
i_max = 1
while t > 1:
t /= 2
i_max += 1
for i in range(i_max,1,-1): # цикл по степеням
j_max = int(100000000 ** (1 / i) + 1)
for j in range(2, j_max):
res = j ** i
if res == val:
return (j,i)
elif res > val:
j_max = int(100000000**(1/i)+1)
break
return 0
digit = int(input("Введите целое число:"))
if 2<=digit<=100000000:
powindex = PowTest(digit)
print("Является степенью {}".format(powindex)
if powindex
else "Не является степенью натурального числа >1")
else:
print("Введенное число выходит за границы 2⩽N⩽100000000") | ekomissarov/edu | py-basics/uneex_homework/4.py | 4.py | py | 1,459 | python | ru | code | 0 | github-code | 13 |
71412256659 | #!/usr/bin/python3
"""Write out solutions waiting on mentoring."""
import datetime
import os
import pathlib
import time
import dotenv
import exercism
def dump_solutions_to_mentor():
"""Fetch solutions waiting for mentoring and write to file."""
ex = exercism.Exercism()
notifications = bool(ex.notifications()["meta"]["unread_count"])
out = []
out.append("<head><title>Solutions</title></head>")
out.append(f"Generated: {datetime.datetime.now().replace(microsecond=0)}<hr>")
if notifications:
out.append("<h1><a href='https://exercism.org/mentoring/inbox'>Notifications</a></h1><hr>")
for track in ("awk", "jq", "bash", "python"):
out.append(f"<h2>{track}</h2>")
out.append("<ul>")
mentor_requests = ex.mentor_requests(track)
for request in mentor_requests:
out.append(
"<li><a href='https://exercism.org/mentoring/queue?track_slug="
f"{track}&exercise_slug={request['exercise_title'].lower().replace(' ', '-')}'>"
f"{request['exercise_title']}</a></li>"
)
out.append("</ul><hr>")
out.append("<ul>")
out.append("<li><a href='https://exercism-team.slack.com/'>Slack</a></li>")
out.append(
"<li><a href='https://github.com/IsaacG/exercism/blob/master/mentoring/python.md'>"
"Python Notes</a></li>"
)
out.append("</ul>")
pathlib.Path(os.getenv("html_path")).write_text("\n".join(out), encoding="utf-8")
if __name__ == "__main__":
dotenv.load_dotenv()
while True:
dump_solutions_to_mentor()
time.sleep(3*60)
| IsaacG/exercism-py | dump_solutions.py | dump_solutions.py | py | 1,630 | python | en | code | 1 | github-code | 13 |
73734582096 | # Напишіть програму, яка для двох додатних цілих чисел знаходить НСД.
#
# Примітка: Для умови циклу в пункті 3 необхідно пам'ятати, що цикл while виконується за умови True, а наш цикл повинен
# закінчитися, тільки якщо gcd поділив обидва числа без залишку.
first = int(input("Enter the first integer: "))
second = int(input("Enter the second integer: "))
gcd = min(first, second)
while not (first % gcd == 0 and second % gcd == 0):
gcd = gcd - 1
print(gcd)
| Radzihowski/GoIT | module_2/ex-9.py | ex-9.py | py | 657 | python | uk | code | 0 | github-code | 13 |
36143184352 | # author : artemis lightman
# date created : feb 6, 2023
# last modified: feb 6, 2023
# command line arguments
# name - name of pdb structure
# dependencies:
# arty.py
# input: pdb structures to relax
# output: relaxed pdb structures
##############################################################################
# runs rosetta relax on pdb structures
##############################################################################
import os
from datetime import datetime
import argparse
import arty
dt_start = datetime.now()
parser = argparse.ArgumentParser(description = "this script completes all structure calculations")
parser.add_argument("-name", help = "name of pdb structure", required = True)
args = parser.parse_args()
os.system("/gpfs/loomis/apps/avx/software/Rosetta/3.12/main/source/bin/relax.static.linuxgccrelease -s " + args.name + " -relax:constrain_relax_to_start_coords > relax_output.txt")
pdb_name = args.name[:-4]
os.system("mv " + pdb_name + "_0001.pdb relaxed_" + pdb_name[:-10] + ".pdb")
dt = datetime.now()
str_dt = dt.strftime("%H:%M:%S on %d %B, %Y")
str_runtime = arty.get_runtime(dt_start, dt)
print ("run_relax.py finished at " + str_dt + " (runtime " + str_runtime + ")")
| artylightman/ppi_docking | scripts/scoring/run_relax.py | run_relax.py | py | 1,224 | python | en | code | 1 | github-code | 13 |
7120873986 | import os
import sys
import torch
import yaml
import tqdm
import torch.nn as nn
from statistics import mean
import torch.optim as optim
from functools import partial
from joblib import cpu_count
from torch.utils.data import DataLoader
from metric import lineCat, draw, saveInfo, readBest
from model import get_model, mod_model
from dataset import brainDataset
class Trainer:
def __init__(self, config, train: DataLoader, val: DataLoader):
self.config = config
self.trainLoader = train
self.valLoader = val
self.best_score = readBest(self.config, "result/")
self.trainL = []
self.trainA = []
self.valL = []
self.valA = []
self._init_params()
def train(self):
for epoch in range(config["num_epochs"]):
# Training
self._run_epoch(epoch)
# Validation
self._eval(epoch)
# Scheduler
self.scheduler.step()
torch.save(self.net.state_dict(), "checkpoint/last_{}.h5".format(self.config["model"]))
if len(self.trainL) > 10 and mean(self.trainL[-10:]) - self.trainL[-1] < 0.001:
break;
# Metric visualization
# Loss
lines, bests = lineCat("loss", self.trainL, self.valL)
draw(lines, bests, self.config, "result/", "Loss", ["train", "val"])
# Accuracy
lines, bests = lineCat("acc", self.trainA, self.valA)
draw(lines, bests, self.config, "result/", "Accuracy", ["train", "val"])
def _run_epoch(self, epoch):
self.net.train()
for param_group in self.optimizer.param_groups:
lr = param_group["lr"]
tq = tqdm.tqdm(self.trainLoader, total=len(list(self.trainLoader)))
tq.set_description("Epoch: {}, lr: {}".format(epoch, lr))
i = 0
total = 0
correct = 0
running_loss = 0
for data in tq:
# get the input; data is a list of [inputs, labels]
inputs, targets = data
inputs, targets = inputs.cuda(), targets.cuda()
# zero the parameter gradients
self.optimizer.zero_grad()
# forward + backward + optimize
outputs = self.net(inputs)
loss = self.criterion(outputs, targets)
loss.backward()
self.optimizer.step()
# calculate statistics
i += 1
running_loss += loss.item()
_, predicted = torch.max(outputs, 1)
total += targets.size(0)
correct += (predicted == targets).sum().item()
# print statistics
tq.set_postfix(loss="{:.4f}".format(running_loss/i)+"; Accuracy="+"{:.4f}".format(100*correct/total)+"%")
# close tqdm
tq.close()
# Metric
self.trainL.append(running_loss/i)
self.trainA.append(100*correct/total)
def _eval(self, epoch):
self.net.eval()
tq = tqdm.tqdm(self.valLoader, total=len(list(self.valLoader)))
tq.set_description("Validation")
i = 0
total = 0
correct = 0
running_loss = 0
for data in tq:
# get the input; data is a list of [inputs, labels]
inputs, targets = data
inputs, targets = inputs.cuda(), targets.cuda()
# zero the parameter gradients
self.optimizer.zero_grad()
# forward
outputs = self.net(inputs)
loss = self.criterion(outputs, targets)
# calculate statistics
i += 1
running_loss += loss.item()
_, predicted = torch.max(outputs, 1)
total += targets.size(0)
correct += (predicted == targets).sum().item()
# print statistics
print("Loss="+"{:.4f}".format(running_loss/i)+"; Accuracy="+"{:.4f}".format(100*correct/total)+"%")
# if current best model
if (100*correct/total) > self.best_score:
self.best_score = (100*correct/total)
saveInfo(self.config, self.best_score, running_loss/i, epoch, "result/")
torch.save(self.net.state_dict(), "checkpoint/best_{}.h5".format(self.config["model"]))
# close tqdm
tq.close()
# Metric
self.valL.append(running_loss/i)
self.valA.append(100*correct/total)
return running_loss/i, 100*correct/total
def _get_optimizer(self, optimizer, params):
name = optimizer["name"]
lr = optimizer["lr"]
if name == "SGD":
opt = optim.SGD(params, momentum=0.9, lr=lr)
elif name == "Adam":
opt = optim.Adam(params, lr=lr)
elif name == "Adadelta":
opt = optim.Adadelta(params, lr=lr)
else:
raise ValueError("Optimizer [%s] not recognized." % name)
return opt
def _get_scheduler(self):
sched = optim.lr_scheduler.StepLR(self.optimizer, step_size=self.config["scheduler"]["step_size"], gamma=self.config["scheduler"]["gamma"])
return sched
def _init_params(self):
self.net = get_model(self.config["model"])
self.net = mod_model(self.config["train"]["class"], self.config["train_last"], self.config["model"], self.net)
if self.config["train"]["use_finetune"]:
self.net = torch.load(self.config["train"]["model_path"])
self.net.cuda()
self.criterion = nn.CrossEntropyLoss()
self.optimizer = self._get_optimizer(self.config["optimizer"], filter(lambda p: p.requires_grad, self.net.parameters()))
self.scheduler = self._get_scheduler()
if __name__ == "__main__":
# Set GPU
os.environ["CUDA_VISIBLE_DEVICES"] = "3, 4"
torch.multiprocessing.set_sharing_strategy("file_system")
torch.cuda.empty_cache()
# Read config
with open('config.yaml', 'r') as f:
config = yaml.load(f)
# Read dataset
batch_size = config["batch_size"]
get_dataloader = partial(DataLoader, batch_size=batch_size, num_workers=cpu_count(), shuffle=True, drop_last=True)
# Load data
datasets = map(config.get, ("train", "val"))
datasets = map(brainDataset, datasets)
train, val = map(get_dataloader, datasets)
# Train
trainer = Trainer(config, train=train, val=val)
trainer.train()
| po-sheng/RSNA_ICH | model/train.py | train.py | py | 6,425 | python | en | code | 0 | github-code | 13 |
11639665006 | """work URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/1.11/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: url(r'^$', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.conf.urls import url, include
2. Add a URL to urlpatterns: url(r'^blog/', include('blog.urls'))
"""
from django.conf.urls import url
from myadmin import views
urlpatterns = [
url(r'^$', views.welcome,name='admin_welcome'),
url(r'help/$',views.help,name='admin_hple'),
url(r'afterset/$',views.afterset,name='admin_afterset'),
# 登录
url(r'login/$',views.log,name='admin_login'),
url(r'verify/$', views.verifycode, name='admin_verify'),
url(r'verify/([0-9]+)',views.verifycode,name='admin_verify'),
url(r'doLogin/$',views.dolog,name='admin_dolog'),
url(r'logout/$',views.logout,name='admin_logout'),
# 会员信息管理
url(r'user_manage/$', views.user_manage, name='admin_user_manage'),
url(r'user_add/$',views.user_add,name='admin_user_add'),
url(r'add_user/$', views.add_user, name='admin_add_user'),
url(r'update_user/(?P<id>[0-9]+)$', views.update_user, name='admin_update_user'),
url(r'update_user_conf/(?P<id>[0-9]+)$', views.update_user_conf, name='admin_update_user_conf'),
url(r'del_user/(?P<id>[0-9]+)$', views.del_user, name='admin_del_user'),
# 商品类别管理
url(r'scan_goods_kind/(?P<pindex>[0-9]*)$',views.scan_goods_kind,name='admin_scan_goods_kind'),
# url(r'add_goods_kind/$',views.add_goods_kind,name='admin_add_goods_kind'),
url(r'add_goods_kind/(?P<tid>[0-9]+)$', views.add_goods_kind, name="admin_add_goods_kind"),
url(r'goods_kind_insert/$', views.goods_kind_insert, name="admin_goods_kind_insert"),
url(r'goods_kind_del/(?P<tid>[0-9]+)$', views.goods_kind_del, name="admin_goods_kind_del"),
url(r'goods_kind_edit/(?P<tid>[0-9]+)$', views.goods_kind_edit, name="admin_goods_kind_edit"),
url(r'goods_kind_update/(?P<tid>[0-9]+)$', views.goods_kind_update, name="admin_goods_kind_update"),
# 商品信息管理
url(r'goods_info/(?P<pindex>[0-9]*)$',views.goods_info,name='admin_goods_info'),
url(r'goods_set/$',views.goods_set,name='admin_goods_set'),
url(r'goods_info_insert/$', views.goods_info_insert, name="admin_goods_info_insert"),
url(r'goods_info_del/(?P<gid>[0-9]+)$', views.goods_info_del, name="admin_goods_info_del"),
url(r'goods_info_edit/(?P<gid>[0-9]+)$', views.goods_info_edit, name="admin_goods_info_edit"),
url(r'goods_info_update/(?P<gid>[0-9]+)$', views.goods_info_update, name="admin_goods_info_update"),
# 订单信息管理
url(r'new_order/$',views.new_order,name='admin_new_order'),
url(r'old_order/$', views.old_order, name='admin_old_order'),
#搜索查询
url(r'goods_search/(?P<pindex>[0-9]*)$',views.goods_search,name='myadmin_goods_search')
]
| Jinnnnyyy/test-ssh-key | work/myadmin/urls.py | urls.py | py | 3,188 | python | en | code | 0 | github-code | 13 |
13440184415 | # -*- coding:utf-8 -*-
import logging
import os
import logging.config
import config
CONSTANTS = config.constant
DEFAULT_CONFIG_FILE = "logging.json"
DEFAULT_ENV_KEY = "LOG_CFG"
DEFAULT_LOG_FILE = CONSTANTS.DEFAULT_LOG_DIR + "eas.log"
DEFAULT_LEVEL = logging.INFO
def get_logger(name=None):
# check whether environment variable has config file name
env_config_file = os.getenv(DEFAULT_ENV_KEY)
if env_config_file:
path = env_config_file
else:
path = DEFAULT_CONFIG_FILE
config_file = config.open_config_file(path)
if config_file:
logging.config.dictConfig(config_file)
else:
logging.basicConfig(level=DEFAULT_LEVEL, filename=DEFAULT_LOG_FILE)
return logging.getLogger(name)
| wakakalu/Entity-alignment-system | entity_align_system/utils/Logging.py | Logging.py | py | 744 | python | en | code | 8 | github-code | 13 |
23863549553 | import requests
from bs4 import BeautifulSoup
def get_page(url):
response = requests.get(url)
soup = BeautifulSoup(response.text, 'lxml')
return soup
def get_links(url):
soup = get_page(url)
links_div = soup.find_all('div', class_="content__list--item--main")
links = [div.a.get('href') for div in links_div]
return links
house_url = "https://bj.lianjia.com/zufang/zufang/BJ2500105086433771520.html";
soup = get_page(house_url)
text = soup.find('div', class_='content__aside--title').text
| weizhang3678/pythonstudy | project_examination/WebCrawler.py | WebCrawler.py | py | 520 | python | en | code | 0 | github-code | 13 |
26141909613 | import os
from hammer_art import logo
print(logo)
bidders = {}
def adding_to_list(name, amount):
bidders[name] = f"{amount}"
def clear_display():
os.system('cls')
print(logo)
bid_status_end = False
while not bid_status_end:
bidders_name = input("What's your name?\n")
bid_amount = input("How much you want to pay?\n")
adding_to_list(name=bidders_name, amount=bid_amount)
next_bid = input("Will there be another bidder? 'yes' or 'no'\n").lower()
if next_bid == "yes":
clear_display()
elif next_bid == "no":
bid_status_end = True
max_value = max(bidders.values())
who_won = []
for bidder in bidders:
if (bidders.get(f"{bidder}")) == max_value:
who_won.append(bidder)
else:
pass
print(f"{who_won[0]} won this bidding auction by bidding {max_value}$.")
| aytovicc/My_python_journey | Python/Secret_Aucation_Program/Secret_Auction_Program.py | Secret_Auction_Program.py | py | 930 | python | en | code | 0 | github-code | 13 |
3184919789 | #coding:utf-8
import sys
#导入相应的包
import smtplib
import pandas as pd
from email.mime.text import MIMEText
from email.utils import formataddr
def getEmailAd(filePath):
f = open(filePath)
str = f.read()
count = 0
temp = []
length = len(str)
print(length)
while count < length:
if str[count] == '<':
count+=1
start = count
while str[count] != '>':
count+=1
end = count
temp.append(str[start:end])
count+=1
print(temp)
f.close()
return temp
def readIntro(filePath):
f = open(filePath, encoding='utf-8')
temp = ""
for line in f:
temp += line
print(temp)
temp.replace("\n", "%0A")
return temp
def excel_email(filePath):
df = pd.read_excel(filePath, usecols=[7], names=None)
df_li = df.values.tolist()
result = []
for s_li in df_li:
result.append(s_li[0])
print(result)
return result
emailList = getEmailAd("email.txt")
excel_emailList = excel_email("nameList2.xlsx")
intro = readIntro("introduce.txt")
# 发送email地址,填入你授权码的那个邮箱地址,此处地址是我常用QQ的地址
from_addr = "597234159@qq.com"
# 此处密码填你之前获得的授权码,不是你的QQ邮箱密码
from_pwd = "zyddxdoxemyybbij"#此处我是随便写的
# 接受email地址,填入你要发送的邮箱地址,此处地址是我另外一个QQ小号的邮箱地址
to_addr = ["597234159@qq.com"]
# MIMEText三个主要参数
# 1. 邮件内容
# 2. MIME子类型,在此案例我们用plain表示text类型
# 3. 邮件编码格式,一定要用"utf-8"编码,因为内容可能包含非英文字符,不用的可能收到的邮件是乱码
msg = MIMEText(intro, "plain", "utf-8")
msg['From']=formataddr(["深圳大学贾森实验室",from_addr])
msg['Subject'] = "计算机学院贾森特聘教授团队诚邀各位研究生加盟"
# 输入SMTP服务器地址,并使用该服务器给你发送电子邮件
# 此处根据不同的邮件服务商有不同的值,
# 现在基本任何一家邮件服务商,如果采用第三方收发邮件,都需要开启授权选项
# 腾讯QQ邮箱的SMTP地址是"smtp.qq.com"
smtp_srv = "smtp.qq.com"
try:
# 不能直接使用smtplib.SMTP来实例化,第三方邮箱会认为它是不安全的而报错
# 使用加密过的SMTP_SSL来实例化,它负责让服务器做出具体操作,它有两个参数
# 第一个是服务器地址,但它是bytes格式,所以需要编码
# 第二个参数是服务器的接受访问端口,SMTP_SSL协议默认端口是465
srv = smtplib.SMTP_SSL(smtp_srv.encode(), 465)
# 使用授权码登录你的QQ邮箱
srv.login(from_addr, from_pwd)
# 使用sendmail方法来发送邮件,它有三个参数
# 第一个是发送地址
# 第二个是接受地址,是list格式,意在同时发送给多个邮箱
# 第三个是发送内容,作为字符串发送
srv.sendmail(from_addr, excel_emailList, msg.as_string())
print('发送成功')
except Exception as e:
print('发送失败')
finally:
#无论发送成功还是失败都要退出你的QQ邮箱
srv.quit() | misaki-taro/python_email | hello.py | hello.py | py | 3,231 | python | zh | code | 0 | github-code | 13 |
1902080303 | import random
from enum import Enum
from typing import Union
from game.models.components.cell import Cell, CellWithShip
from game.models.components.ship import Ship
class ResultAttack(Enum):
MISS = 1
HIT = 2
SUNK = 3
ATTACKED = 4
ERROR = 5
class Player:
def __init__(self, board_size: (int, int) = (10, 10),
name: str = 'Player'):
self.name = name
self.ships = []
self.width_board = board_size[0]
self.height_board = board_size[1]
self.board = [[Cell() for _ in range(self.width_board)]
for _ in range(self.height_board)]
self.opponent_board = [[Cell() for _ in range(self.width_board)]
for _ in range(self.height_board)]
self.__move = None
self.time = 120
def set_move(self, pos: (int, int)):
self.__move = pos
def get_move(self) -> (int, int):
move = self.__move
self.__move = None
return move
def place_ships_randomly(self, ships: list[(int, int)]):
for width, height in ships:
placed = False
while not placed:
x = random.randint(0, self.width_board - 1)
y = random.randint(0, self.height_board - 1)
horizontal = random.choice([True, False])
placed = self.place_ship(Ship((x, y), width,
height, horizontal))
def place_ship(self, ship: Ship) -> bool:
width = ship.width
length = ship.length
x = ship.pos[0]
y = ship.pos[1]
if ship.horizontal:
width, length = length, width
for i in range(max(0, x - 1), min(x + length + 1, self.height_board)):
for j in range(max(0, y - 1), min(y + width + 1,
self.width_board)):
if isinstance(self.board[i][j], CellWithShip):
return False
if y + width > self.width_board or x + length > self.height_board:
return False
for i in range(length):
for j in range(width):
self.board[x + i][y + j] = CellWithShip(ship)
self.ships.append(ship)
return True
def get_ship(self, pos: (int, int)) -> Ship:
cell = self.board[pos[0]][pos[1]]
if isinstance(cell, CellWithShip):
return cell.get_ship()
return None
def fire(self, x: int, y: int) -> ResultAttack:
try:
cell = self.board[x][y]
except IndexError:
return ResultAttack.ERROR
if cell.is_hit():
return ResultAttack.ATTACKED
if isinstance(cell, CellWithShip):
ship = cell.get_ship()
ship.hits += 1
cell.set_hit(True)
if ship.is_sunk():
self.ships.remove(ship)
return ResultAttack.SUNK
return ResultAttack.HIT
cell.set_hit(True)
return ResultAttack.MISS
def is_loser(self) -> bool:
return not bool(len(self.ships)) or not self.time
def remove_ship(self, pos: (int, int)) -> Union[None, Ship]:
try:
cell = self.board[pos[0]][pos[1]]
except IndexError:
return None
if isinstance(cell, CellWithShip):
ship = cell.get_ship()
self.ships.remove(ship)
width = ship.width
length = ship.length
if ship.horizontal:
width, length = length, width
for i in range(length):
for j in range(width):
self.board[i + ship.pos[0]][j + ship.pos[1]] = Cell()
return ship
def update_opponent_board(self, x: int, y: int, result: ResultAttack):
if result != ResultAttack.MISS and result != ResultAttack.ATTACKED:
cell = CellWithShip(Ship((x, y), 1, 1))
cell.set_hit(True)
self.opponent_board[x][y] = cell
else:
self.opponent_board[x][y].set_hit(True)
def set_name(self, name: str):
self.name = name
| Mensh1kov/Sea_battle | game/models/components/player.py | player.py | py | 4,149 | python | en | code | 0 | github-code | 13 |
20056720737 | import urllib
import re
def crawl_names_save_csv():
"""
This function crawls data from site and puts them in csv
"""
country = ["indian", "american", "french", "german", "australian", "arabic", "christian", "english", "iranian", "irish"]
alpha = ["a", "b", "c", "d", "e", "f", "g", "h", "i", "j", "k", "l", "m", "n", "o", "p", "q", "r", "s", "t", "u", "v",
"w", "x", "y", "z"]
i = 0
k = 0
names_map = {}
while i < len(country):
while k < len(alpha):
gi_url = 'https://www.babynamesdirect.com/baby-names/' + country[i] + '/' + 'girl' + '/' + alpha[k] + '/';
b_url = 'https://www.babynamesdirect.com/baby-names/' + country[i] + '/' + 'boy' + '/' + alpha[k] + '/';
g_req = urllib.urlopen(gi_url)
b_req = urllib.urlopen(b_url)
g_res = g_req.read()
b_res = b_req.read()
links = re.findall('"((http|ftp)s?://.*?)"', g_res)
blinks = re.findall('"((http|ftp)s?://.*?)"', b_res)
for link in links:
if "babynamesdirect.com/girl/" in link[0]:
names_map[str(link[0].split("/")[-1])] = "girl"
for link in blinks:
if "babynamesdirect.com/boy/" in link[0]:
names_map[str(link[0].split("/")[-1])] = "boy"
k = k + 1
i = i + 1
f = open("Baby_names.csv", 'w');
for key in names_map:
f.write(key + "," + names_map[key])
f.write("\n");
print ("CSV Updated..")
f.close()
if __name__ == "__main__":
crawl_names_save_csv() | DivyaJoshi20/rankwatch17_py_scraping | py_scraping/Project1_py_scraping.py | Project1_py_scraping.py | py | 1,611 | python | en | code | 0 | github-code | 13 |
14133733472 | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow as tf
import tensorflow.contrib.slim as slim
FLAGS = tf.app.flags.FLAGS
# Batch normalization. Constant governing the exponential moving average of
# the 'global' mean and variance for all activations.
BATCHNORM_MOVING_AVERAGE_DECAY = 0.9997
def vgg_16(inputs,
num_classes=1000,
is_training=True,
dropout_keep_prob=0.5,
spatial_squeeze=True,
scope='vgg_16'):
"""Oxford Net VGG 16-Layers version D Example.
Note: All the fully_connected layers have been transformed to conv2d layers.
To use in classification mode, resize input to 224x224.
Args:
inputs: a tensor of size [batch_size, height, width, channels].
num_classes: number of predicted classes.
is_training: whether or not the model is being trained.
dropout_keep_prob: the probability that activations are kept in the dropout
layers during training.
spatial_squeeze: whether or not should squeeze the spatial dimensions of the
outputs. Useful to remove unnecessary dimensions for classification.
scope: Optional scope for the variables.
Returns:
the last op containing the log predictions and end_points dict.
"""
with tf.name_scope(scope, 'vgg_16', [inputs]) as sc:
end_points_collection = sc + '_end_points'
# Collect outputs for conv2d, fully_connected and max_pool2d.
with slim.arg_scope([slim.conv2d, slim.fully_connected, slim.max_pool2d],
outputs_collections=end_points_collection):
net = slim.repeat(inputs, 2, slim.conv2d, 64, [3, 3], scope='conv1')
net = slim.max_pool2d(net, [2, 2], scope='pool1')
net = slim.repeat(net, 2, slim.conv2d, 128, [3, 3], scope='conv2')
net = slim.max_pool2d(net, [2, 2], scope='pool2')
net = slim.repeat(net, 3, slim.conv2d, 256, [3, 3], scope='conv3')
net = slim.max_pool2d(net, [2, 2], scope='pool3')
net = slim.repeat(net, 3, slim.conv2d, 512, [3, 3], scope='conv4')
net = slim.max_pool2d(net, [2, 2], scope='pool4')
net = slim.repeat(net, 3, slim.conv2d, 512, [3, 3], scope='conv5')
net = slim.max_pool2d(net, [2, 2], scope='pool5')
# Use conv2d instead of fully_connected layers.
net = slim.conv2d(net, 4096, [3, 3], padding='VALID', scope='fc6')
net = slim.dropout(net, dropout_keep_prob, is_training=is_training,
scope='dropout6')
net = slim.conv2d(net, 4096, [1, 1], scope='fc7')
net = slim.dropout(net, dropout_keep_prob, is_training=is_training,
scope='dropout7')
net = slim.conv2d(net, num_classes, [1, 1],
activation_fn=None,
normalizer_fn=None,
scope='fc8')
# Convert end_points_collection into a end_point dict.
end_points = slim.utils.convert_collection_to_dict(end_points_collection)
if spatial_squeeze:
net = tf.squeeze(net, [1, 2], name='fc8/squeezed')
end_points[sc + '/fc8'] = net
return net, end_points
def inference(images, num_classes, is_training=True, scope='vgg_16'):
"""Build Inception v3 model architecture.
See here for reference: http://arxiv.org/abs/1512.00567
Args:
images: Images returned from inputs() or distorted_inputs().
num_classes: number of classes
for_training: If set to `True`, build the inference model for training.
Kernels that operate differently for inference during training
e.g. dropout, are appropriately configured.
restore_logits: whether or not the logits layers should be restored.
Useful for fine-tuning a model with different num_classes.
scope: optional prefix string identifying the ImageNet tower.
Returns:
Logits. 2-D float Tensor.
Auxiliary Logits. 2-D float Tensor of side-head. Used for training only.
"""
# Parameters for BatchNorm.
batch_norm_params = {
# Decay for the moving averages.
'decay': BATCHNORM_MOVING_AVERAGE_DECAY,
# epsilon to prevent 0s in variance.
'epsilon': 0.001,
# calculate moving average or using exist one
'is_training': is_training
}
# Set weight_decay for weights in Conv and FC layers.
with slim.arg_scope([slim.conv2d, slim.fully_connected],
weights_regularizer=slim.l2_regularizer(FLAGS.weight_decay)):
with slim.arg_scope([slim.conv2d],
weights_initializer=slim.variance_scaling_initializer(),
activation_fn=tf.nn.relu,
normalizer_fn=slim.batch_norm,
normalizer_params=batch_norm_params):
logits, endpoints = vgg_16(
images,
num_classes=num_classes,
dropout_keep_prob=0.8,
is_training=is_training,
scope=scope
)
# Add summaries for viewing model statistics on TensorBoard.
_activation_summaries(endpoints)
return logits
def loss(logits, labels):
"""Adds all losses for the model.
Note the final loss is not returned. Instead, the list of losses are collected
by slim.losses. The losses are accumulated in tower_loss() and summed to
calculate the total loss.
Args:
logits: List of logits from inference(). Each entry is a 2-D float Tensor.
labels: Labels from distorted_inputs or inputs(). 1-D tensor
of shape [batch_size]
"""
# Reshape the labels into a dense Tensor of
# shape [FLAGS.batch_size, num_classes].
with tf.name_scope("train_loss"):
# cal softmax loss
num_classes = logits[0].get_shape()[-1].value
onehot_labels = tf.one_hot(indices=tf.cast(labels, tf.int32), depth=num_classes)
softmax_loss = tf.losses.softmax_cross_entropy(onehot_labels=onehot_labels,
logits=logits,
label_smoothing=FLAGS.label_smoothing,
scope='softmax_loss')
tf.summary.scalar('softmax_loss', softmax_loss)
# cal regular loss
regularization_loss = tf.add_n(tf.get_collection(tf.GraphKeys.REGULARIZATION_LOSSES))
tf.summary.scalar('train_regular_loss', regularization_loss)
# cal total loss
total_loss = softmax_loss + regularization_loss
tf.summary.scalar('train_total_loss', total_loss)
return total_loss
def _activation_summary(x):
"""Helper to create summaries for activations.
Creates a summary that provides a histogram of activations.
Creates a summary that measure the sparsity of activations.
Args:
x: Tensor
"""
# session. This helps the clarity of presentation on tensorboard.
tf.summary.histogram(x.op.name + '/activations', x)
tf.summary.scalar(x.op.name + '/sparsity', tf.nn.zero_fraction(x))
def _activation_summaries(endpoints):
with tf.name_scope('summaries'):
for act in endpoints.values():
_activation_summary(act)
| ZhihengCV/tensorflow_face | models/vgg_model.py | vgg_model.py | py | 7,272 | python | en | code | 3 | github-code | 13 |
16824664314 | """Service for SQL module."""
from typing import Optional, Tuple
from jsonschema import validate
from hyrisecockpit.api.app.connection_manager import ManagerSocket
from hyrisecockpit.api.app.exception import StatusCodeNotFoundException
from hyrisecockpit.message import response_schema
from hyrisecockpit.request import Header, Request
from hyrisecockpit.response import Response
from .interface import SqlQueryInterface
from .model import SqlResponse
class SqlService:
"""Services of the Control Controller."""
@staticmethod
def _send_message(message: Request) -> Response:
"""Send an IPC message with data to a database interface, return the response."""
with ManagerSocket() as socket:
response = socket.send_message(message)
validate(instance=response, schema=response_schema)
return response
@classmethod
def execute_sql(
cls, interface: SqlQueryInterface
) -> Tuple[Optional[SqlResponse], int]:
"""Execute sql query."""
response = cls._send_message(
Request(header=Header(message="execute sql query"), body=dict(interface))
)
if response["header"]["status"] == 200:
return (
SqlResponse(**response["body"]["results"]),
200,
)
elif response["header"]["status"] == 404:
return (
None,
404,
)
else:
raise StatusCodeNotFoundException(
f"Unknown status code: {response['header']['status']}"
)
| hyrise/Cockpit | hyrisecockpit/api/app/sql/service.py | service.py | py | 1,589 | python | en | code | 14 | github-code | 13 |
31494210872 | # Altere o programa de cálculo dos números primos, informando,
# caso o número não seja primo, por quais número ele é divisível.
import math
l1 = []
a = int(input('Num: '))
for i in range (2,a+1,1):
b = a/i
c = math.floor(b)
d = b - c
if d == 0:
l1.append(b)
if (len(l1) > 1) or (a == 0):
print(f'{a} não um número primo')
else:
print(f'{a} é um número primo')
print(l1)
| GuilhermeMastelini/Exercicios_documentacao_Python | Estrutura de Repetição/Lição 22.py | Lição 22.py | py | 454 | python | pt | code | 0 | github-code | 13 |
31941587460 | class ListNode:
def __init__(self, x):
self.val = x
self.next = None
class Solution:
def isPalindrome(self, head: ListNode) -> bool:
def reverse(head: ListNode) -> ListNode:
pre, cur = None, head
while cur:
tmp = cur.next
cur.next = pre
pre = cur
cur = tmp
return pre
slow, fast = head, head
while fast and fast.next:
slow = slow.next
fast = fast.next.next
p1, p2 = head, reverse(slow)
while p1 != slow:
if p1.val != p2.val:
return False
p1 = p1.next
p2 = p2.next
return True
| wylu/leetcodecn | src/python/explore/linkedlist/basic/回文链表.py | 回文链表.py | py | 729 | python | en | code | 3 | github-code | 13 |
17916053896 | all_rules = open("Day7", 'r').read().split("\n")
rule_map = {rule.split(" contain ")[0][:-5]: rule.split(" contain ")[1] for rule in all_rules}
def contains_sg(colour):
if "no other bag" not in rule_map[colour]:
if "shiny gold" in rule_map[colour]:
return True
bags = rule_map[colour].split(', ')
colours = []
for each_bag in bags:
x = each_bag.split(' ')
colours.append(x[1] + ' ' + x[2])
for bag_colour in colours:
if contains_sg(bag_colour):
return True
return False
def count_bags(n):
if "no other bag" not in rule_map[n]:
bags = rule_map[n].split(", ")
total = 0
for each_bag in bags:
x = each_bag.split(' ')
total += int(x[0]) + (int(x[0])*count_bags(x[1] + ' ' + x[2]))
return total
return 0
def main():
total = 0
for x in rule_map.keys():
if contains_sg(x):
total += 1
print("Part 1: ", total)
print("Part 2: ", count_bags("shiny gold"))
if __name__ == '__main__':
main()
| aamnaam/AoC-2020 | Day07.py | Day07.py | py | 1,104 | python | en | code | 0 | github-code | 13 |
42446914214 | def are_occurrences_equal(s: str):
"""
Given a string s, return true if s is a good string, or false otherwise.
A string s is good if all the characters that appear in s have the same number of
occurrences (i.e., the same frequency).
"""
count_dict = {}
for char in s:
if char not in count_dict:
count_dict[char] = 1
else:
count_dict[char] += 1
return len(set(count_dict.values())) == 1
print(are_occurrences_equal("abacbc"))
print(are_occurrences_equal("aaabb")) | Dimaaap/Leetcode | Easy/1941.) Check if All Characters Have Equal Number of Occurences.py | 1941.) Check if All Characters Have Equal Number of Occurences.py | py | 537 | python | en | code | 0 | github-code | 13 |
25605943355 | import requests, csv, json, argparse
from argparse import ArgumentParser
#loads data from setup.json
setupData = json.load(open("setup.json"))
#base url to acess the Aeries API (default is the Aeries demo API)
baseURL = setupData["baseURL"]
#header for Aeries API
#place your Aeries cert here (this is not secure at all)
headers = {'content-type': setupData["content-type"],
"AERIES-CERT": setupData["AERIES-CERT"]
}
path = []
#simple api call function
def makeAPICall(url):
return requests.get(url, headers=headers)
#Prints all school codes and school names
def getSchools():
resp = makeAPICall(baseURL)
data = resp.json()
for entry in data:
sc = entry["SchoolCode"]
sn = entry["Name"]
print(sc, ' - ', sn)
#displays information on a specfic school to the console
def getSchools_SC(schoolCode):
schoolCode = args.schoolCode
url = baseURL + str(schoolCode)
resp = makeAPICall(url)
data = resp.json()
print(json.dumps(data, indent=4))
#displays the bell schedule on the console
def getSchoolBellSchedule(schoolCode):
schoolCode = args.schoolCode
url = baseURL + str(schoolCode) + "/BellSchedule"
resp = makeAPICall(url)
data = resp.json()
print(json.dumps(data, indent=4))
#generates a CSV file of all student info from a specfic school
def getStudentInfo_CSV(schoolCode):
schoolCode = args.schoolCode
url = baseURL + str(schoolCode) + "/students"
resp = makeAPICall(url)
data = resp.json()
linecount = -1
#https://gist.github.com/mabroor/2828962
f = open("students.csv", 'w')
fieldnames = data[0].keys()
csvwriter = csv.DictWriter(f, delimiter=',', fieldnames=fieldnames)
csvwriter.writerow(dict((fn, fn) for fn in fieldnames))
for row in data:
csvwriter.writerow(row)
linecount += 1
f.close()
print('Wrote ' + str(linecount) + ' students to "students.csv"')
#displays a specfic student's information on the console
def getStudentInfo_SID(schoolCode, studentID):
schoolCode = args.schoolCode
studentID = args.studentID
url = baseURL + str(schoolCode) + "/students/" + str(studentID) + "/extended"
resp = makeAPICall(url)
data = resp.json()
print(json.dumps(data, indent=4))
#generates a CSV file of retrived student info from a specfic school in a specific grade level
def getStudentInfo_GL_CSV(schoolCode, gradeLevel):
schoolCode = args.schoolCode
gradeLevel = args.gradeLevel
url = baseURL + str(schoolCode) + "/students/grade/" + str(gradeLevel) + "/extended"
resp = makeAPICall(url)
data = resp.json()
linecount = -1
#https://gist.github.com/mabroor/2828962
f = open("students_grade_" + gradeLevel + ".csv", 'w')
fieldnames = data[0].keys()
csvwriter = csv.DictWriter(f, delimiter=',', fieldnames=fieldnames)
csvwriter.writerow(dict((fn, fn) for fn in fieldnames))
for row in data:
csvwriter.writerow(row)
linecount += 1
f.close()
print('Wrote ' + str(linecount) + ' students to "students_grade_' + gradeLevel + '.csv"')
#command line function calling achieved via argparse
#Reference: https://docs.python.org/3/library/argparse.html, https://stackoverflow.com/a/30669126, https://stackoverflow.com/a/24584876
parser = ArgumentParser()
parser.add_argument("function",
nargs="?",
choices=['getSchools', 'getSchools_SC', 'getSchoolBellSchedule', 'getStudentInfo_CSV', 'getStudentInfo_SID', 'getStudentInfo_GL_CSV'],
default='getSchools',
)
args, sub_args = parser.parse_known_args()
# Manually handle the default for "function"
function = "getSchools" if args.function is None else args.function
# Parse the remaining args as per the selected subcommand
if function == "getSchools":
getSchools()
elif function == "function2":
parser.add_argument('schoolCode')
args = parser.parse_args(sub_args)
getSchools_SC(args.schoolCode)
elif function == "getSchoolBellSchedule":
parser.add_argument('schoolCode')
args = parser.parse_args(sub_args)
getSchoolBellSchedule(args.schoolCode)
elif function == "getStudentInfo_CSV":
parser.add_argument('schoolCode')
args = parser.parse_args(sub_args)
getStudentInfo_CSV(args.schoolCode)
elif function == "getStudentInfo_SID":
parser.add_argument('schoolCode')
parser.add_argument('studentID')
args = parser.parse_args(sub_args)
getStudentInfo_SID(args.schoolCode, args.studentID)
elif function == "getStudentInfo_GL_CSV":
parser.add_argument('schoolCode')
parser.add_argument('gradeLevel')
args = parser.parse_args(sub_args)
getStudentInfo_GL_CSV(args.schoolCode, args.gradeLevel) | bwernick/aeries-get-API-data | data.py | data.py | py | 4,610 | python | en | code | 1 | github-code | 13 |
72865799377 | # Di - 1 = 2 * (Di + 1)
def f(day: int, remain: int):
today = remain
total = remain
for i in range(day, 0, -1):
print(f"day {i} eat ${total}")
yesterday = 2 * (today + 1)
total += yesterday - today
today = yesterday
return total
print(f(10, 1))
| zsh2401/oblivion | lang/python/snnu/peach.py | peach.py | py | 296 | python | en | code | 1 | github-code | 13 |
7876521778 | import torch.nn as nn
from DepthwiseSeparableConvolution import depthwise_separable_conv
class bottle_screener(nn.Module):
def __init__(self, num_classes=3):
super(bottle_screener, self).__init__()
input_shape = () #input shape of the image
self.dsc0 = depthwise_separable_conv(nin, nout, kernel_size = 3, padding = 1, bias=False) #filters 32
self.pool0 = nn.MaxPool2d((2,2)) #pool size(2,2)
self.dsc1 = depthwise_separable_conv(nin, nout, kernel_size = 3, padding = 1, bias=False) #filters 64
self.pool1 = nn.MaxPool2d((2,2))
self.dsc2 = depthwise_separable_conv(nin, nout, kernel_size = 3, padding = 1, bias=False) #filters 128
self.pool2 = nn.MaxPool2d((2,2))
self.flatten = nn.Flatten()
self.linear0 = nn.Linear(512)
self.relu = nn.ReLU
self.dropout = nn.Dropout(0.5)
self.linear1 = nn.Linear(3)
self.sigmoid = nn.Softmax()
#My old model in Tensorflow
'''
model.add(Conv2D(filters=32, kernel_size=(3,3), strides=1, padding='same',input_shape=image_shape, activation='relu',))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Conv2D(filters=64, kernel_size=(3,3), strides=1, padding='same',input_shape=image_shape, activation='relu',))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Conv2D(filters=128, kernel_size=(3,3), strides=1, padding='same',input_shape=image_shape, activation='relu',))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Flatten())
model.add(Dense(512))
model.add(Activation('relu'))
model.add(Dropout(0.5))
model.add(Dense(len(folders)))
model.add(Activation('sigmoid'))
''' | andyliu666/RD | DepthwiseSeparableConvolution/model_cnn.py | model_cnn.py | py | 1,637 | python | en | code | 0 | github-code | 13 |
42515194479 | """
:mod: `cli` -- Command line interface to ucalumni
=================================================
.. module: cli
For
(c) Joaquim Carvalho 2021.
MIT License, no warranties.
"""
# cli interface
# we use Typer https://typer.tiangolo.com
import typer
from ucalumni.importer import import_auc_alumni
from ucalumni.aluno import Aluno
from ucalumni import extractors
from ucalumni import mapping
app = typer.Typer()
@app.command()
def import_alumni(
csv_file: str =
typer.Argument(...,
help='path of the csv file exported from Archeevo'),
dest_dir: str =
typer.Argument(...,
help="Destination directory for generated files and databases"),
db_connection: str=
typer.Option("", prompt="Database connection string (blank for no direct import)?",
help="SQLAlchemy connection string if direct import is desired, leave blank if not. "
"If a connection string is provided no Kleio files are generated."),
rows: int =
typer.Option(0,
help='max number of rows to processed (0 for all)'),
batch: int =
typer.Option(500,
help='Number of records per file or database insert'),
dryrun: bool =
typer.Option(False,
help='output to terminal, do not create files'),
echo: bool =
typer.Option(False,
help='echo the information in each row of the export file'),
skip_until_id: str =
typer.Option(None,help="Skip until id (inclusive)")
):
"""
Generate kleio source files for alumni records exported from
AUC catalog (Archeevo)
"""
import_auc_alumni(csv_file,
dest_dir,
db_connection,
rows,
batch,
dryrun,
echo)
typer.echo('Import finished.')
if __name__ == "__main__":
app()
| joaquimrcarvalho/fauc1537-1919 | notebooks/ucalumni/cli.py | cli.py | py | 2,119 | python | en | code | 2 | github-code | 13 |
18093776811 | from django.urls import path
from . import dbapi
urlpatterns = [
# 添加另一个让后端查询数据库的api接口
path('login', dbapi.login, name='login'), # 数据库登录接口
path('dbapi',dbapi.dbapi,name='dbapi'), # 数据库请求总接口,负责查询所有字段(包括数据库名、表名、列名、注释以及字段数据)
path('select', dbapi.select, name='select'), # 查询接口,负责精确地按条件查询数据
path('mselect', dbapi.mselect, name='mselect'), # 查询接口,负责进行模糊地按条件查询数据
path('delete', dbapi.delete, name='delete'), # 删除接口,删除数据库字段
path('insert', dbapi.insert, name='insert'), # 插入接口,负责插入前端提交的数据
path('edit', dbapi.edit, name='edit'), # 修改接口,根据前端提交的数据更新数据库字段
path('export', dbapi.export, name='export'), # 原导出接口,负责响应文件下载请求(现已弃用)
]
| RickySakura/DBSYS | ulb_manager/backend/urls.py | urls.py | py | 990 | python | zh | code | 0 | github-code | 13 |
29671321526 | from django.db import models
from django.core import validators
class Item(models.Model):
SEX_CHOICES = (
(1, '業務'),
(2, '業務以外'),
)
name = models.CharField(
verbose_name='TODO名',
max_length=200,
)
age = models.IntegerField(
verbose_name='期限月',
validators=[validators.MinValueValidator(1)],
blank=True,
null=True,
)
sex = models.IntegerField(
verbose_name='TODO区分',
choices=SEX_CHOICES,
default=1
)
memo = models.TextField(
verbose_name='内容',
max_length=300,
blank=True,
null=True,
)
created_at = models.DateTimeField(
verbose_name='登録日',
auto_now_add=True
)
# 管理サイト上の表示設定
def __str__(self):
return self.name
class Meta:
verbose_name = 'アイテム'
verbose_name_plural = 'アイテム' | akimoto-ri/todoApp | app/models.py | models.py | py | 976 | python | en | code | 0 | github-code | 13 |
74564471378 | #!/usr/bin/env python
"""
_GetSiteInfo_
MySQL implementation of Locations.GetSiteInfo
"""
from WMCore.Database.DBFormatter import DBFormatter
class GetSiteInfo(DBFormatter):
"""
Grab all the relevant information for a given site.
Usually useful only in the submitter
"""
sql = """SELECT wl.site_name, wpnn.pnn, wl.ce_name, wl.pending_slots,
wl.running_slots, wl.plugin, wl.cms_name, wlst.name AS state
FROM wmbs_location wl
INNER JOIN wmbs_location_pnns wls ON wls.location = wl.id
INNER JOIN wmbs_pnns wpnn ON wpnn.id = wls.pnn
INNER JOIN wmbs_location_state wlst ON wlst.id = wl.state
"""
def execute(self, siteName=None, conn=None, transaction=False):
if not siteName:
results = self.dbi.processData(self.sql, conn=conn,
transaction=transaction)
else:
sql = self.sql + " WHERE wl.site_name = :site"
results = self.dbi.processData(sql, {'site': siteName},
conn=conn, transaction=transaction)
return self.format(results)
def format(self, result):
"""
Format the DB results in a plain list of dictionaries, with one
dictionary for each site name, thus with a list of PNNs.
:param result: DBResult object
:return: a list of dictionaries
"""
# first create a dictionary to make key look-up easier
resp = {}
for thisItem in DBFormatter.format(self, result):
# note that each item has 7 columns returned from the database
siteName = thisItem[0]
resp.setdefault(siteName, dict())
siteInfo = resp[siteName]
siteInfo['site_name'] = siteName
siteInfo.setdefault('pnn', [])
if thisItem[1] not in siteInfo['pnn']:
siteInfo['pnn'].append(thisItem[1])
siteInfo['ce_name'] = thisItem[2]
siteInfo['pending_slots'] = thisItem[3]
siteInfo['running_slots'] = thisItem[4]
siteInfo['plugin'] = thisItem[5]
siteInfo['cms_name'] = thisItem[6]
siteInfo['state'] = thisItem[7]
# now return a flat list of dictionaries
return list(resp.values())
| dmwm/WMCore | src/python/WMCore/WMBS/MySQL/Locations/GetSiteInfo.py | GetSiteInfo.py | py | 2,354 | python | en | code | 44 | github-code | 13 |
1681494826 | #!/usr/bin/env python3
#encoding=utf-8
#-------------------------------------------------
# Usage: python3 4-getattribute_to_compute_attribute.py
# Description: attribute management 4 of 4
# Same, but with generic __getattribute__ all attribute interception
#-------------------------------------------------
class Powers(object): # Need (object) in 2.X only
def __init__(self, square, cube):
self._square = square
self._cube = cube
def __getattribute__(self, name):
if name == 'square':
return object.__getattribute__(self, '_square') ** 2 # call superclass's __getattribute__, avoid recursive loop
elif name == 'cube':
return object.__getattribute__(self, '_cube') ** 3 # call superclass's __getattribute__, avoid recursive loop
else:
return object.__getattribute__(self, name) # call superclass's __getattribute__, avoid recursive loop
def __setattr__(self, name, value):
if name == 'square':
object.__setattr__(self, '_square', value) # Or use __dict__
elif name == 'cube':
object.__setattr__(self, '_cube', value)
else:
object.__setattr__(self, name , value)
if __name__ == '__main__':
X = Powers(3, 4)
print(X.square) # 3 ** 2 = 9
print(X.cube) # 4 ** 3 = 64
X.square = 5
print(X.square) # 5 ** 2 = 25
X.cube = 7
print(X.cube)
| mindnhand/Learning-Python-5th | Chapter38.ManagedAttributes/4-4-getattribute_to_compute_attribute.py | 4-4-getattribute_to_compute_attribute.py | py | 1,465 | python | en | code | 0 | github-code | 13 |
71876598097 | from src.nn.activations.hidden_activations import ReLu, Sigmoid, Tanh, Identity
from src.nn.activations.softmax import Softmax
RELU = 'relu'
SIGMOID = 'sigmoid'
TANH = 'tanh'
IDENTITY = 'identity'
SOFTMAX = 'softmax'
def get_activation(name):
if name == RELU:
return ReLu()
elif name == SIGMOID:
return Sigmoid()
elif name == TANH:
return Tanh()
elif name == SOFTMAX:
return Softmax()
elif name == IDENTITY:
return Identity()
else:
raise ValueError(f'Invalid name of activation: {name}')
| binkjakub/neural-networks | src/nn/activations/__init__.py | __init__.py | py | 563 | python | en | code | 1 | github-code | 13 |
42514193995 | class addlist:
def __init__(self):
self.list1 = list()
self.size = 0
self.sum = 0
self.accept()
def accept(self):
self.size = int(input("Enter the Number of values in List : "))
def lista(self):
for i in range(self.size):
no2 = int(input("Enter the numbers : "))
self.list1.append(no2)
def display(self):
print(self.list1)
def maxele(self):
max = self.list1[0]
for i in self.list1:
if i > max:
max = i
return max
def minele(self):
min = self.list1[0]
for i in self.list1:
if i < min:
min = i
return min
def addlis(self):
self.sum = 0
for i in self.list1:
self.sum = self.sum + i
return self.sum
def average(self):
avg = self.sum / self.size
return avg
def __prime(self,no):
flag = True
for i in range(2,(no//2) + 1):
if (no % i == 0):
flag = False
break
return flag
def checkprime(self):
for i in range(0,self.size):
if(self.__prime(self.list1[i])==True):
print(f"{self.list1[i]}Prime number")
def main():
obj = addlist()
obj.lista()
obj.display()
maxx = obj.maxele()
print("Maximum number from list is : ",maxx)
minn = obj.minele()
print("Maximum number from list is : ", minn)
addList = obj.addlis()
print("Addition of Element in List are : ", addList)
avgg = obj.average()
print("Average is : ",avgg)
print(obj.checkprime())
if __name__=="__main__":
main() | Shantanu-gilbile/Python-Programs | oop3.py | oop3.py | py | 1,795 | python | en | code | 0 | github-code | 13 |
72093790739 | import objects
from get_grand_prix_name import GetGrandPrixNameFromCommandLineArguments
import load_config
import load_predictions
import load_race_results
def ProcessProgressionPerformance(grand_prix_name, active_year):
results = load_race_results.ReadRaceResults(grand_prix_name, active_year)
#predictions = load_predictions.ReadPredictions(grand_prix_name, active_year)
drivers = results.GetDrivers()
drivers.Sort()
for driver in drivers:
qualifying_position = results.GetQualifyingResult(driver).position
race_position = results.GetRaceResult(driver).position
outcome = (20-race_position) - (20-qualifying_position)
print(str(driver) + ", " + str(qualifying_position) + ", " + str(race_position) + ", " + str(outcome))
if __name__== "__main__":
config = load_config.read_config()
grand_prix_name = GetGrandPrixNameFromCommandLineArguments(default=config.default_race)
ProcessProgressionPerformance(grand_prix_name, config.current_year)
| JamesScanlan/f1ftw | f1ftw/calculate_progression_performance.py | calculate_progression_performance.py | py | 1,006 | python | en | code | 0 | github-code | 13 |
30138929692 | from flask import Blueprint
from zou.app.utils.api import configure_api_from_blueprint
from zou.app.blueprints.files.resources import (
WorkingFilePathResource,
LastWorkingFilesResource,
ModifiedFileResource,
CommentWorkingFileResource,
NewWorkingFileResource,
TaskWorkingFilesResource,
EntityWorkingFilesResource,
EntityOutputFilePathResource,
GetNextEntityOutputFileRevisionResource,
NewEntityOutputFileResource,
LastEntityOutputFilesResource,
EntityOutputTypesResource,
EntityOutputTypeOutputFilesResource,
InstanceOutputFilePathResource,
NewInstanceOutputFileResource,
GetNextInstanceOutputFileRevisionResource,
LastInstanceOutputFilesResource,
InstanceOutputTypesResource,
InstanceOutputTypeOutputFilesResource,
EntityOutputFilesResource,
InstanceOutputFilesResource,
SetTreeResource,
FileResource,
WorkingFileFileResource,
GuessFromPathResource,
)
routes = [
("/data/files/<file_id>", FileResource),
("/data/tasks/<task_id>/working-files", TaskWorkingFilesResource),
("/data/tasks/<task_id>/working-files/new", NewWorkingFileResource),
(
"/data/tasks/<task_id>/working-files/last-revisions",
LastWorkingFilesResource,
),
("/data/tasks/<task_id>/working-file-path", WorkingFilePathResource),
(
"/data/asset-instances/<asset_instance_id>"
"/entities/<temporal_entity_id>/output-files/new",
NewInstanceOutputFileResource,
),
(
"/data/asset-instances/<asset_instance_id>"
"/entities/<temporal_entity_id>/output-files/next-revision",
GetNextInstanceOutputFileRevisionResource,
),
(
"/data/asset-instances/<asset_instance_id>"
"/entities/<temporal_entity_id>/output-files/last-revisions",
LastInstanceOutputFilesResource,
),
(
"/data/asset-instances/<asset_instance_id>"
"/entities/<temporal_entity_id>/output-types",
InstanceOutputTypesResource,
),
(
"/data/asset-instances/<asset_instance_id>"
"/entities/<temporal_entity_id>/output-types"
"/<output_type_id>/output-files",
InstanceOutputTypeOutputFilesResource,
),
(
"/data/asset-instances/<asset_instance_id>"
"/entities/<temporal_entity_id>/output-file-path",
InstanceOutputFilePathResource,
),
("/data/entities/<entity_id>/working-files", EntityWorkingFilesResource),
(
"/data/entities/<entity_id>/output-files/new",
NewEntityOutputFileResource,
),
(
"/data/entities/<entity_id>/output-files/next-revision",
GetNextEntityOutputFileRevisionResource,
),
(
"/data/entities/<entity_id>/output-files/last-revisions",
LastEntityOutputFilesResource,
),
("/data/entities/<entity_id>/output-types", EntityOutputTypesResource),
(
"/data/entities/<entity_id>/output-types/<output_type_id>/output-files",
EntityOutputTypeOutputFilesResource,
),
("/data/entities/<entity_id>/output-files", EntityOutputFilesResource),
(
"/data/asset-instances/<asset_instance_id>/output-files",
InstanceOutputFilesResource,
),
(
"/data/entities/<entity_id>/output-file-path",
EntityOutputFilePathResource,
),
("/data/entities/guess_from_path", GuessFromPathResource),
(
"/data/working-files/<working_file_id>/file",
WorkingFileFileResource,
),
("/actions/projects/<project_id>/set-file-tree", SetTreeResource),
(
"/actions/working-files/<working_file_id>/comment",
CommentWorkingFileResource,
),
(
"/actions/working-files/<working_file_id>/modified",
ModifiedFileResource,
),
]
blueprint = Blueprint("files", "files")
api = configure_api_from_blueprint(blueprint, routes)
| cgwire/zou | zou/app/blueprints/files/__init__.py | __init__.py | py | 3,877 | python | en | code | 152 | github-code | 13 |
33588055570 | import wolframalpha
import webbrowser as wb
import pyttsx3
import pyaudio
import speech_recognition as sr
import wikipedia
import sys
engine = pyttsx3.init('sapi5')
client = wolframalpha.Client('LX3VUJ-P5KRT24VJA')
class assistant:
def say(audio):
print('Computer: '+audio)
engine.say(audio)
engine.runAndWait()
class search:
def wikipedia(queryWiki):
outcome = wikipedia.search(queryWiki)
print("Computer: "+outcome)
engine.say(outcome)
engine.runAndWait()
def wolframalpha(query):
res = client.query(query)
output = next(res.results).text
output = str(output)
print("Computer: "+output)
engine.say(output)
engine.runAndWait()
def Engine(w3Address):
wb.open(w3Address)
print("Computer: opening "+w3Address)
class control:
def exit():
sys.exit()
class Voice:
class get:
def myCommand():
r = sr.Recognizer()
with sr.Microphone() as source:
print('LISTENING......')
audio2 = r.listen(source)
try:
text = r.recognize_google(audio2)
print(text)
except:
print("sorry! Unable to connect")
engine.say('sorry! Unable to connect')
engine.runAndWait()
| Debjeet-Banerjee/test | assistant.py | assistant.py | py | 1,638 | python | en | code | 0 | github-code | 13 |
19450174255 | #!/usr/bin/python
from gurobipy import *
import pandas as pd
import sys
import time
from Tree import Tree
from BendersOCT import BendersOCT
import logger
import getopt
import csv
from sklearn.model_selection import train_test_split
from utils import *
from logger import logger
def get_left_exp_integer(master, b, n, i):
lhs = quicksum(-master.m[i] * master.b[n, f] for f in master.cat_features if master.data.at[i, f] == 0)
return lhs
def get_right_exp_integer(master, b, n, i):
lhs = quicksum(-master.m[i] * master.b[n, f] for f in master.cat_features if master.data.at[i, f] == 1)
return lhs
def get_target_exp_integer(master, p, beta, n, i):
label_i = master.data.at[i, master.label]
if master.mode == "classification":
lhs = -1 * master.beta[n, label_i]
elif master.mode == "regression":
# min (m[i]*p[n] - y[i]*p[n] + beta[n] , m[i]*p[n] + y[i]*p[n] - beta[n])
if master.m[i] * p[n] - label_i * p[n] + beta[n, 1] < master.m[i] * p[n] + label_i * p[n] - beta[n, 1]:
lhs = -1 * (master.m[i] * master.p[n] - label_i * master.p[n] + master.beta[n, 1])
else:
lhs = -1 * (master.m[i] * master.p[n] + label_i * master.p[n] - master.beta[n, 1])
return lhs
def get_cut_integer(master, b, p, beta, left, right, target, i):
lhs = LinExpr(0) + master.g[i]
for n in left:
tmp_lhs = get_left_exp_integer(master, b, n, i)
lhs = lhs + tmp_lhs
for n in right:
tmp_lhs = get_right_exp_integer(master, b, n, i)
lhs = lhs + tmp_lhs
for n in target:
tmp_lhs = get_target_exp_integer(master, p, beta, n, i)
lhs = lhs + tmp_lhs
return lhs
def subproblem(master, b, p, beta, i):
label_i = master.data.at[i, master.label]
current = 1
right = []
left = []
target = []
subproblem_value = 0
while True:
pruned, branching, selected_feature, terminal, current_value = get_node_status(master, b, beta, p, current)
if terminal:
target.append(current)
if current in master.tree.Nodes:
left.append(current)
right.append(current)
if master.mode == "regression":
subproblem_value = master.m[i] - abs(current_value - label_i)
elif master.mode == "classification" and beta[current, label_i] > 0.5:
subproblem_value = 1
break
elif branching:
if master.data.at[i, selected_feature] == 1: # going right on the branch
left.append(current)
target.append(current)
current = master.tree.get_right_children(current)
else: # going left on the branch
right.append(current)
target.append(current)
current = master.tree.get_left_children(current)
return subproblem_value, left, right, target
##########################################################
# Defining the callback function
###########################################################
def mycallback(model, where):
'''
This function is called by gurobi at every node through the branch-&-bound tree while we solve the model.
Using the argument "where" we can see where the callback has been called. We are specifically interested at nodes
where we get an integer solution for the master problem.
When we get an integer solution for b and p, for every datapoint we solve the subproblem which is a minimum cut and
check if g[i] <= value of subproblem[i]. If this is violated we add the corresponding benders constraint as lazy
constraint to the master problem and proceed. Whenever we have no violated constraint! It means that we have found
the optimal solution.
:param model: the gurobi model we are solving.
:param where: the node where the callback function is called from
:return:
'''
data_train = model._master.data
mode = model._master.mode
local_eps = 0.0001
if where == GRB.Callback.MIPSOL:
func_start_time = time.time()
model._callback_counter_integer += 1
# we need the value of b,w and g
g = model.cbGetSolution(model._vars_g)
b = model.cbGetSolution(model._vars_b)
p = model.cbGetSolution(model._vars_p)
beta = model.cbGetSolution(model._vars_beta)
added_cut = 0
# We only want indices that g_i is one!
for i in data_train.index:
if mode == "classification":
g_threshold = 0.5
elif mode == "regression":
g_threshold = 0
if g[i] > g_threshold:
subproblem_value, left, right, target = subproblem(model._master, b, p, beta, i)
if mode == "classification" and subproblem_value == 0:
added_cut = 1
lhs = get_cut_integer(model._master, b, p, beta, left, right, target, i)
model.cbLazy(lhs <= 0)
elif mode == "regression" and ((subproblem_value + local_eps) < g[i]):
added_cut = 1
lhs = get_cut_integer(model._master, b, p, beta, left, right, target, i)
model.cbLazy(lhs <= 0)
func_end_time = time.time()
func_time = func_end_time - func_start_time
# print(model._callback_counter)
model._total_callback_time_integer += func_time
if added_cut == 1:
model._callback_counter_integer_success += 1
model._total_callback_time_integer_success += func_time
def main(argv):
print(argv)
input_file = None
depth = None
time_limit = None
_lambda = None
input_sample = None
calibration = None
mode = "classification"
'''
Depending on the value of input_sample we choose one of the following random seeds and then split the whole data
into train, test and calibration
'''
random_states_list = [41, 23, 45, 36, 19, 123]
try:
opts, args = getopt.getopt(argv, "f:d:t:l:i:c:m:",
["input_file=", "depth=", "timelimit=", "lambda=",
"input_sample=",
"calibration=", "mode="])
except getopt.GetoptError:
sys.exit(2)
for opt, arg in opts:
if opt in ("-f", "--input_file"):
input_file = arg
elif opt in ("-d", "--depth"):
depth = int(arg)
elif opt in ("-t", "--timelimit"):
time_limit = int(arg)
elif opt in ("-l", "--lambda"):
_lambda = float(arg)
elif opt in ("-i", "--input_sample"):
input_sample = int(arg)
elif opt in ("-c", "--calibration"):
calibration = int(arg)
elif opt in ("-m", "--mode"):
mode = arg
start_time = time.time()
data_path = os.getcwd() + '/../../DataSets/'
data = pd.read_csv(data_path + input_file)
'''Name of the column in the dataset representing the class label.
In the datasets we have, we assume the label is target. Please change this value at your need'''
label = 'target'
# Tree structure: We create a tree object of depth d
tree = Tree(depth)
##########################################################
# output setup
##########################################################
approach_name = 'FlowOCT'
out_put_name = input_file + '_' + str(input_sample) + '_' + approach_name + '_d_' + str(depth) + '_t_' + str(
time_limit) + '_lambda_' + str(
_lambda) + '_c_' + str(calibration)
out_put_path = os.getcwd() + '/../../Results/'
# Using logger we log the output of the console in a text file
sys.stdout = logger(out_put_path + out_put_name + '.txt')
##########################################################
# data splitting
##########################################################
'''
Creating train, test and calibration datasets
We take 50% of the whole data as training, 25% as test and 25% as calibration
When we want to calibrate _lambda, for a given value of _lambda we train the model on train and evaluate
the accuracy on calibration set and at the end we pick the _lambda with the highest accuracy.
When we got the calibrated _lambda, we train the mode on (train+calibration) which we refer to it as
data_train_calibration and evaluate the accuracy on (test)
'''
data_train, data_test = train_test_split(data, test_size=0.25, random_state=random_states_list[input_sample - 1])
data_train_calibration, data_calibration = train_test_split(data_train, test_size=0.33,
random_state=random_states_list[input_sample - 1])
if calibration == 1: # in this mode, we train on 50% of the data; otherwise we train on 75% of the data
data_train = data_train_calibration
train_len = len(data_train.index)
##########################################################
# Creating and Solving the problem
##########################################################
# We create the master problem by passing the required arguments
master = BendersOCT(data_train, label, tree, _lambda, time_limit, mode)
master.create_master_problem()
master.model.update()
master.model.optimize(mycallback)
end_time = time.time()
solving_time = end_time - start_time
##########################################################
# Preparing the output
##########################################################
b_value = master.model.getAttr("X", master.b)
beta_value = master.model.getAttr("X", master.beta)
p_value = master.model.getAttr("X", master.p)
print("\n\n")
print_tree(master, b_value, beta_value, p_value)
print('\n\nTotal Solving Time', solving_time)
print("obj value", master.model.getAttr("ObjVal"))
print('Total Callback counter (Integer)', master.model._callback_counter_integer)
print('Total Successful Callback counter (Integer)', master.model._callback_counter_integer_success)
print('Total Callback Time (Integer)', master.model._total_callback_time_integer)
print('Total Successful Callback Time (Integer)', master.model._total_callback_time_integer_success)
# print(b_value)
# print(p_value)
# print(beta_value)
##########################################################
# Evaluation
##########################################################
'''
For classification we report accuracy
For regression we report MAE (Mean Absolute Error) , MSE (Mean Squared Error) and R-squared
over training, test and the calibration set
'''
train_acc = test_acc = calibration_acc = 0
train_mae = test_mae = calibration_mae = 0
train_r_squared = test_r_squared = calibration_r_squared = 0
if mode == "classification":
train_acc = get_acc(master, data_train, b_value, beta_value, p_value)
test_acc = get_acc(master, data_test, b_value, beta_value, p_value)
calibration_acc = get_acc(master, data_calibration, b_value, beta_value, p_value)
elif mode == "regression":
train_mae = get_mae(master, data_train, b_value, beta_value, p_value)
test_mae = get_mae(master, data_test, b_value, beta_value, p_value)
calibration_mae = get_mae(master, data_calibration, b_value, beta_value, p_value)
train_mse = get_mse(master, data_train, b_value, beta_value, p_value)
test_mse = get_mse(master, data_test, b_value, beta_value, p_value)
calibration_mse = get_mse(master, data_calibration, b_value, beta_value, p_value)
train_r2 = get_r_squared(master, data_train, b_value, beta_value, p_value)
test_r2 = get_r_squared(master, data_test, b_value, beta_value, p_value)
calibration_r2 = get_r_squared(master, data_calibration, b_value, beta_value, p_value)
print("obj value", master.model.getAttr("ObjVal"))
if mode == "classification":
print("train acc", train_acc)
print("test acc", test_acc)
print("calibration acc", calibration_acc)
elif mode == "regression":
print("train mae", train_mae)
print("train mse", train_mse)
print("train r^2", train_r_squared)
##########################################################
# writing info to the file
##########################################################
master.model.write(out_put_path + out_put_name + '.lp')
# writing info to the file
result_file = out_put_name + '.csv'
with open(out_put_path + result_file, mode='a') as results:
results_writer = csv.writer(results, delimiter=',', quotechar='"', quoting=csv.QUOTE_NONNUMERIC)
if mode == "classification":
results_writer.writerow(
[approach_name, input_file, train_len, depth, _lambda, time_limit,
master.model.getAttr("Status"), master.model.getAttr("ObjVal"), train_acc,
master.model.getAttr("MIPGap") * 100, master.model.getAttr("NodeCount"), solving_time,
master.model._total_callback_time_integer, master.model._total_callback_time_integer_success,
master.model._callback_counter_integer, master.model._callback_counter_integer_success,
test_acc, calibration_acc, input_sample])
elif mode == "regression":
results_writer.writerow(
[approach_name, input_file, train_len, depth, _lambda, time_limit,
master.model.getAttr("Status"),
master.model.getAttr("ObjVal"), train_mae, train_mse, train_r_squared,
master.model.getAttr("MIPGap") * 100, master.model.getAttr("NodeCount"), solving_time,
master.model._total_callback_time_integer, master.model._total_callback_time_integer_success,
master.model._callback_counter_integer, master.model._callback_counter_integer_success,
test_mae, calibration_mae,
test_mse, calibration_mse,
test_r_squared, calibration_r2,
input_sample])
if __name__ == "__main__":
main(sys.argv[1:])
| D3M-Research-Group/StrongTree | Code/StrongTree/BendersOCTReplication.py | BendersOCTReplication.py | py | 14,276 | python | en | code | 11 | github-code | 13 |
37454126363 | import numpy as np
import math
from numpy.linalg import inv
import matplotlib.pyplot as plt
ArrayK = []
XS_1 = 1.2
XS_2 = 0.2
XS_3 = 2.9
XS_4 = 2.1
ave = 0.25*(XS_1 + XS_2 + XS_3 + XS_4)
AK = 0.0
K = 1
ArrayA = [AK]
ArrayK = [0]
while (K <= 5):
XH_1 = AK
XH_2 = AK
XH_3 = AK
XH_4 = AK
deltaX1 = XS_1 - XH_1
deltaX2 = XS_2 - XH_2
deltaX3 = XS_3 - XH_3
deltaX4 = XS_4 - XH_4
AK = AK + 0.25*(deltaX1 + deltaX2 + deltaX3 + deltaX4)
print(AK)
ArrayA.append(AK)
ArrayK.append(K)
K = K+1
plt.figure(1)
plt.grid(True)
plt.plot(ArrayK,ArrayA,label='a', linewidth=0.6)
plt.plot(ArrayK,[ave for a in range(0,6)],label='ave', linewidth=0.6)
plt.xlabel('Time (Sec)')
plt.ylabel('X Estimate and True Signal')
plt.legend()
plt.show()
| jgonzal3/KalmanFiltering | Chapter18/listing18_1.py | listing18_1.py | py | 741 | python | en | code | 1 | github-code | 13 |
3659551352 | import pandas as pd
import sys
from pathlib import Path
# Set console output formatting
pd.set_option("display.max_columns", 800)
pd.set_option("display.width", 800)
# Define scripts working directory
PWD = Path(sys.argv[0]).absolute().parent
# Set root directory for fractile results
ROOT_RES = Path(__file__).parents[2] / "3_fractile_calcs" / "results"
# Set root directory for fractiles output
ROOT_OUT = PWD.parent / "figures"
# Define directory with case info blocks to put on plots
DIR_INFO = PWD / "info"
# Define plotting dictionaries
# FIXME: left is assumed to be U* and right is assumed to be 1-U*; fix wording
LS_DICT = {
"contribs": "solid",
"left": (0, (3, 3)),
"right": (0, (6, 2)),
"folded": "solid",
"Mean": "solid",
"0.5": "dashdot",
"0.16": (0, (3, 3)),
"0.84": (0, (3, 3)),
"0.05": "dotted",
"0.95": "dotted",
}
LC_DICT = {
"left": "tab:blue",
"right": "tab:orange",
"folded": "black",
"contribs": "gray",
}
LW_DICT = {
"contribs": 0.2,
"folded": 0.8,
"left": 0.8,
"right": 0.8,
"Mean": 0.8,
"0.5": 0.75,
"0.16": 0.75,
"0.84": 0.75,
"0.05": 0.9,
"0.95": 0.9,
}
# Define axis limits
LIMS_DICT = {
"norcia_case1": {"x": [0.01, 10], "y": [1e-6, 1e-3]},
"le_teil_case2": {"x": [0.01, 10], "y": [1e-7, 1e-4]},
"le_teil_extra": {"x": [0.01, 10], "y": [1e-8, 1e-3]},
"kumamoto_case3": {"x": [0.01, 10], "y": [1e-8, 1e-3]},
"kumamoto_case2": {"x": [0.01, 10], "y": [1e-8, 1e-3]},
}
# Define Kumamoto Sensitivity #2 dictionaries for source contribution plots
KM_ID_DICT = {
"Float": "Float (Unwtd. Branch)",
"F2": "F2 (Unwtd. Branch)",
"F1_F2": "F1+F2",
"F2_F3": "F2+F3",
"Full": "F1+F2+F3",
"Combined_F2_and_Float": "Combined F2/Float Branch",
"Total": "Total",
}
KM_LC_DICT = {
"Float": "tab:purple",
"F2": "tab:green",
"F1_F2": "k",
"F2_F3": "k",
"Full": "k",
"Combined_F2_and_Float": "gray",
"Total": "tab:red",
}
KM_LS_DICT = {
"Float": "solid",
"F2": "solid",
"F1_F2": "dotted",
"F2_F3": (0, (9, 3)),
"Full": "dashdot",
"Combined_F2_and_Float": "solid",
"Total": "solid",
}
KM_LW_DICT = {
"Float": 0.5,
"F2": 0.5,
"F1_F2": 0.9,
"F2_F3": 0.75,
"Full": 0.75,
"Combined_F2_and_Float": 0.75,
"Total": 0.9,
}
| asarmy/iaea-benchmarking-kea22 | 4_plotting/scripts/plotting_config.py | plotting_config.py | py | 2,360 | python | en | code | 0 | github-code | 13 |
37165091333 | from os import remove
from os.path import exists
from tempfile import mkdtemp, NamedTemporaryFile
from .test_utils import TestCase
from pulsar.cache import Cache
from shutil import rmtree
class CacheTest(TestCase):
def setUp(self):
self.temp_dir = mkdtemp()
self.temp_file = NamedTemporaryFile(delete=False)
self.temp_file.write(b"Hello World!")
self.temp_file.close()
self.cache = Cache(self.temp_dir)
def tearDown(self):
rmtree(self.temp_dir)
if exists(self.temp_file.name):
remove(self.temp_file.name)
def test_inserted_only_once(self):
cache = self.cache
cache_response_1 = cache.cache_required("127.0.0.2", "/galaxy/dataset10001.dat")
cache_response_2 = cache.cache_required("127.0.0.2", "/galaxy/dataset10001.dat")
assert cache_response_1
assert not cache_response_2
def test_making_file_available(self):
cache = self.cache
assert cache.cache_required("127.0.0.2", "/galaxy/dataset10001.dat")
assert not cache.file_available("127.0.0.2", "/galaxy/dataset10001.dat")["ready"]
cache.cache_file(self.temp_file.name, "127.0.0.2", "/galaxy/dataset10001.dat")
assert cache.file_available("127.0.0.2", "/galaxy/dataset10001.dat")["ready"]
| galaxyproject/pulsar | test/cache_test.py | cache_test.py | py | 1,309 | python | en | code | 37 | github-code | 13 |
72599707537 | import pytest
from web_driver_setup import WebDriverSetup
from common.test_login import login
from page_object.pages.menu_bar import MenuBar
from page_object.pages.project_page import ProjectPage
from page_object.pages.file_browser_page import FileBrowserPage
from selenium.webdriver.common.keys import Keys
driver = WebDriverSetup().driver
class TestAddAttachment:
def test_before(self):
login(driver)
def test_add_attachment(self):
# choose Project from the menu
menu = MenuBar(driver)
menu.wait_for_menu(5)
menu.get_btn_projekt().click()
# open attachment dropdown
project = ProjectPage(driver)
project.wait_for_btn_add_attachment(5)
project.get_btn_add_attachment().click()
project.wait_for_btn_add_plan(5)
project.get_btn_add_plan().click()
# add new directory to plan
driver.switch_to.window(driver.window_handles[-1])
browser = FileBrowserPage(driver)
browser.wait_for_btn_create_dir(5)
browser.get_btn_create_dir().click()
browser.wait_for_fld_dir_name(5)
browser.get_fld_dir_name().clear()
browser.get_fld_dir_name().send_keys('testtesttest', Keys.ENTER)
# verify that the new directory was added
browser.wait_for_dir_name(5, 'testtesttest')
def test_after(self):
driver.quit()
| Mrkabu/zadanie_rekrutacyjne | zadanie_rekrutacyjne/tests/test_add_attachment.py | test_add_attachment.py | py | 1,384 | python | en | code | 0 | github-code | 13 |
37387320115 | from rest_framework.response import Response
from rest_framework.views import APIView
from rest_framework_simplejwt.authentication import JWTAuthentication
from rest_framework.permissions import IsAuthenticated
from rest_framework import status
from Accounts.models import User
from .models import *
from .serializers import *
# Create your views here.
#POST api - insert address
#GET api - by admin
class Insert_Address(APIView):
authentication_classes = [JWTAuthentication]
permission_classes = [IsAuthenticated]
def post(self,request):
data = request.data
try:
admin_obj = User.objects.get(UUID=data['admin_UUID'])
user_obj = User.objects.get(UUID=data['owner_UUID'])
addr_obj = Address.objects.create(admin_incharge=admin_obj,owned_by=user_obj,
token_id=data['token_id'],area_code=data['area_code'],
city=data['city'],state=data['state'],category=data['category'],
district=data['district'],sub_registrar_office=data['sub_registrar_office'],
village=data['village'],ward_no=data['ward_no'],
total_extend=data['total_extend'],extend_of_land=data['extend_of_land'],
street_name=data['street_name'],door_no=data['door_no'],house_no=data['house_no'],
dimension=data['dimension'],metadata=data['metadata'],in_marketplace=data['in_marketplace'])
serializer = AddressSerializer(addr_obj)
return Response({"success":True,"message":"Inserted address successfully","data":serializer.data},status=status.HTTP_200_OK)
except:
return Response({"success":False,"message":"Error"},status=status.HTTP_200_OK)
| buttonchicken/LandRegistration | Address/views.py | views.py | py | 1,928 | python | en | code | 0 | github-code | 13 |
30139610262 | """Add nb assets ready column
Revision ID: a66508788c53
Revises: 1e150c2cea4d
Create Date: 2021-11-23 00:07:43.717653
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = "a66508788c53"
down_revision = "1e150c2cea4d"
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.add_column(
"task", sa.Column("nb_assets_ready", sa.Integer(), nullable=True)
)
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_column("task", "nb_assets_ready")
# ### end Alembic commands ###
| cgwire/zou | zou/migrations/versions/a66508788c53_add_nb_assets_ready.py | a66508788c53_add_nb_assets_ready.py | py | 693 | python | en | code | 152 | github-code | 13 |
34326081472 | import os
import fnmatch
from PIL import Image
def resize(in_path, out_path, size=64):
img_dim = (size, size)
# i = 0
for f in os.scandir(in_path):
if fnmatch.fnmatch(f, '*.jpg'):
with Image.open(f.path) as img:
img = img.resize(img_dim, resample=1, reducing_gap=3)
img.save(os.path.join(out_path, f.name))
print(os.path.join(out_path, f.name))
in_path = os.path.join(os.getcwd(), 'images')
out_path = os.path.join(in_path, '64')
resize(in_path, out_path) | leoagneau/Bib_Racer | RBNR_lixilinx/resize_to_64_64.py | resize_to_64_64.py | py | 537 | python | en | code | 2 | github-code | 13 |
16644738377 | """
You are given two non-empty linked lists representing two non-negative integers.
The digits are stored in reverse order, and each of their nodes contains a single digit.
Add the two numbers and return the sum as a linked list.
You may assume the two numbers do not contain any leading zero, except the number 0 itself.
Example 1:
Input: l1 = [2,4,3], l2 = [5,6,4]
Output: [7,0,8]
Explanation: 342 + 465 = 807.
Example 2:
Input: l1 = [0], l2 = [0]
Output: [0]
Example 3:
Input: l1 = [9,9,9,9,9,9,9], l2 = [9,9,9,9]
Output: [8,9,9,9,0,0,0,1]
Constraints:
The number of nodes in each linked list is in the range [1, 100].
0 <= Node.val <= 9
It is guaranteed that the list represents a number that does not have leading zeros.
"""
# Definition for singly-linked list.
class ListNode:
def __init__(self, val=0, next=None):
self.val = val
self.next = next
class Solution:
def addTwoNumbers(self, l1, l2 ,c = 0):
"""
:type l1: ListNode
:type l2: ListNode
:rtype: ListNode
"""
# this is old school math additional one column addition and carry
# carry value to next column tens column and hundreds and so on
# this repetition calls on the a recursive call
# add the ones column with a c as the carry
val = l1.val + l2.val + c
# calculate the next c Carry for the next recursion call
c = val // 10
# this makes sure that you only get the ones column
ret = ListNode(val % 10 )
if (l1.next != None or l2.next != None or c != 0):
# if l1 is shorter fill with 0
if l1.next == None:
l1.next = ListNode(0)
# if l2 is shorter fill with 0
if l2.next == None:
l2.next = ListNode(0)
# call the next recusion to calculate the next column, tens, then hundreds, then thousands
ret.next = self.addTwoNumbers(l1.next,l2.next,c)
# all columns have been added
# return results linked list ret
return ret
| JeffreyAsuncion/CodingProblems_Python | LeetCode/LC002.py | LC002.py | py | 2,105 | python | en | code | 0 | github-code | 13 |
31625649654 | # -*- coding: utf-8 -*-
"""
Created on Tue Mar 15 15:37:12 2016
@author: ajaver
"""
import tables
import pandas as pd
import numpy as np
import matplotlib.pylab as plt
import glob, os
from MWTracker.trackWorms.checkHeadOrientation import isWormHTSwitched
from MWTracker.intensityAnalysis.getIntensityProfile import getWidthWinLimits
from MWTracker.featuresAnalysis.getFilteredFeats import worm_partitions
#getWidthWinLimits(width_resampling, width_percentage)
#ff = '/Users/ajaver/Desktop/Videos/single_worm/agar_2/MaskedVideos/C11D2.2 (ok1565)IV on food L_2011_08_24__10_24_57___7___2.hdf5'
#ff = '/Users/ajaver/Desktop/Videos/single_worm/agar_2/MaskedVideos/acr-6 (ok3117)I on food L_2010_02_23__13_11_55___1___13.hdf5'
#ff = '/Users/ajaver/Desktop/Videos/single_worm/agar_1/MaskedVideos/goa-1 on food R_2009_10_30__15_20_35___4___8.hdf5'
#ff = ff.replace('\\', '')
#check_dir = '/Users/ajaver/Desktop/Videos/single_worm/agar_goa/MaskedVideos/'
#check_dir = '/Users/ajaver/Desktop/Videos/single_worm/agar_1/MaskedVideos/'
#check_dir = '/Users/ajaver/Desktop/Videos/single_worm/agar_2/MaskedVideos/'
check_dir = '/Users/ajaver/Desktop/Videos/single_worm/swimming/MaskedVideos/'
#import peakutils
#
#def getPeaks(signal, thres, min_dist):
# base = peakutils.baseline(signal)
# XX = signal-base
# bot = np.min(XX)
# top = np.max(XX)
# XX = (XX -bot)/(top-bot)
# peakind = peakutils.indexes(XX, thres=thres, min_dist=min_dist)
# return peakind
#
#def getExtrema(signal,thres, min_dist):
# minima = getPeaks(-signal, thres, min_dist)
# maxima = getPeaks(signal, thres, min_dist)
# return minima, maxima
def searchIntPeaks(median_int, d_search = [7, 25, 35, 45]):
length_resampling = median_int.shape[0]
peaks_ind = []
hh = 0
tt = length_resampling
for ii, ds in enumerate(d_search):
func_search = np.argmin if ii % 2 == 0 else np.argmax
hh = func_search(median_int[hh:ds]) + hh
dd = length_resampling-ds
tt = func_search(median_int[dd:tt]) + dd
peaks_ind.append((hh,tt))
return peaks_ind
all_median = []
for ff in glob.glob(os.path.join(check_dir, '*')):
ff = ff.replace('MaskedVideos', 'Results')
base_name = os.path.split(ff)[1].rpartition('.')[0]
print(base_name)
trajectories_file = ff[:-5] + '_trajectories.hdf5'
skeletons_file = ff[:-5] + '_skeletons.hdf5'
intensities_file = ff[:-5] + '_intensities.hdf5'
try:
with tables.File(skeletons_file, 'r') as fid:
if fid.get_node('/skeleton')._v_attrs['has_finished'] != 4:
raise
except:
continue
with pd.HDFStore(trajectories_file, 'r') as fid:
plate_worms = fid['/plate_worms']
with pd.HDFStore(skeletons_file, 'r') as fid:
trajectories_data = fid['/trajectories_data']
with tables.File(skeletons_file, 'r') as fid:
skeletons = fid.get_node('/skeleton')[:]
with tables.File(intensities_file, 'r') as fid:
worm_int = fid.get_node('/straighten_worm_intensity_median')[:].astype(np.float)
#worm_int_map = fid.get_node('/straighten_worm_intensity')[:].astype(np.float)
worm_int -= np.median(worm_int, axis=1)[:, np.newaxis]
#%%
is_switch_skel, roll_std = isWormHTSwitched(skeletons, segment4angle = 5, max_gap_allowed = 10, \
window_std = 25, min_block_size=250)
head_angle = np.nanmedian(roll_std['head_angle'])
tail_angle = np.nanmedian(roll_std['tail_angle'])
p_mov = head_angle/(head_angle + tail_angle)
#%%
median_int = np.median(worm_int, axis=0)
all_median.append((base_name, median_int))
peaks_ind = searchIntPeaks(median_int, d_search = [7, 25, 35, 45])
headbot2neck = median_int[peaks_ind[3][0]] - median_int[peaks_ind[2][0]]
headbot2neck = 0 if headbot2neck < 0 else headbot2neck
tailbot2waist = median_int[peaks_ind[3][1]] - median_int[peaks_ind[2][1]]
tailbot2waist = 0 if tailbot2waist < 0 else tailbot2waist
p_int_bot = headbot2neck/(headbot2neck+tailbot2waist)
headtop2bot = median_int[peaks_ind[1][0]] - median_int[peaks_ind[2][0]]
headtop2bot = 0 if headtop2bot < 0 else headtop2bot
tailtop2bot = median_int[peaks_ind[1][1]] - median_int[peaks_ind[2][1]]
tailtop2bot = 0 if tailtop2bot < 0 else tailtop2bot
p_int_top = headtop2bot/(headtop2bot+tailtop2bot)
p_tot = 0.75*p_mov + 0.15*p_int_bot + 0.1*p_int_top
#if it is nan, both int changes where negatives, equal the probability to the p_mov
if p_tot != p_tot:
p_tot = p_mov
print('M %2.2f | T1 %2.2f | T2 %2.2f | tot %2.2f' % (p_mov, p_int_bot, p_int_top, p_tot))
#%%
plt.figure()
plt.title(base_name)
plt.plot(median_int, label ='0.3')
strC = 'rgck'
for ii, dd in enumerate(peaks_ind):
for xx in dd:
plt.plot(xx, median_int[xx], 'o' + strC[ii])
#%%
#%%
#
# offset_a, offset_b = 5, 20
# offset_c = offset_b + 2
# offset_d = offset_c + (offset_b-offset_a)
# int_range = np.max(worm_int, axis=1) - np.min(worm_int, axis=1)
#
# #the head is likely to have a peak so we take the maximum
# head_int = np.max(worm_int[:, offset_a:offset_b], axis = 1)
# tail_int = np.median(worm_int[:, -offset_b:-offset_a], axis= 1)
#
# #while the neck is more a plateau so we take the median
# neck_int = np.max(worm_int[:, offset_c:offset_d], axis = 1)
# waist_int = np.median(worm_int[:, -offset_d:-offset_c], axis= 1)
#
#
#
# A = np.nanmedian(head_angle/tail_angle)
# I = np.nanmedian((head_int-tail_int)/int_range)
#
# In = np.nanmedian((head_int-neck_int)/int_range)
# Iw = np.nanmedian((tail_int-waist_int)/int_range)
#
# print(base_name)
# print('A: %f | I:% f |In:% f |Iw:% f ' % (A, I, In, Iw))
# #print('head_angle: %f : tail_angle %f' % (np.nanmedian(roll_std['head_angle']), np.nanmedian(roll_std['tail_angle'])))
# #print('head_int: %f : tail_int %f' % (np.median(head_int), np.median(tail_int)))
# print('')
#%%
#%%
#signal = median_int[3:-3]
#peakind_min, peakind_max = getExtrema(signal,0.1, min_dist)
#max_head = signal[peakind_max[0]]
#max_tail = signal[peakind_max[-1]]
#range_int = (max(signal) - min(signal))
#head_tail_ratio = (max_head-max_tail)/range_int
#%%
#for base_name,median_int in all_median:
# min_dist = 10
#
# plt.figure()
# signal = median_int[3:-3]
# peakind_min, peakind_max = getExtrema(signal,0.1, min_dist)
#
# max_head = signal[peakind_max[0]]
# max_tail = signal[peakind_max[-1]]
#
# range_int = (max(signal) - min(signal))
#
# head_tail_ratio = (max_head-max_tail)/range_int
#
# good = (peakind_min >= peakind_max[0]+min_dist) & (peakind_min <= peakind_max[-1]-min_dist)
# peakind_min = peakind_min[good]
#
# min_neck = signal[peakind_min[0]]
# min_waist = signal[peakind_min[-1]]
#
# head_neck_ratio = (max_head-min_neck)/range_int
# tail_waist_ratio = (max_tail-min_waist)/range_int
#
# print(head_tail_ratio, head_neck_ratio, tail_waist_ratio)
# print('')
# plt.plot(signal, label ='0.3')
# plt.plot(peakind_max, signal[peakind_max], 'or')
# plt.plot(peakind_min, signal[peakind_min], 'og')
# plt.title(base_name)
#
#%%
# for mm in [5, 20, 22, 35]:
# plt.plot((mm, mm), plt.gca().get_ylim(), 'r:')
# nn = length_resampling-mm
# plt.plot((nn, nn), plt.gca().get_ylim(), 'r:')
##
#%%
# plt.figure()
# plt.imshow(worm_int.T, interpolation='none', cmap='gray')
# plt.grid('off')
# plt.xlim((0, 1000))
# #%%
#
# wlim = getWidthWinLimits(15, 0.5)
#
# worm_int2 = np.zeros_like(worm_int)
#
# for kk in range(worm_int_map.shape[0]):
# worm_int2[kk,:] = np.median(worm_int_map[kk,:,wlim[0]:wlim[1]], axis=1)
#
# worm_int2 -= np.median(worm_int2, axis=1)[:, np.newaxis]
#%%
# plt.figure()
# #plt.imshow(worm_int_map[8000,:,wlim[0]:wlim[1]].T, interpolation='none', cmap='gray')
# plt.imshow(worm_int2.T, interpolation='none', cmap='gray')
# plt.grid('off')
# plt.xlim((0, 1000))
#%%
#plt.figure()
#plt.plot(np.std(worm_int_map[:, 40:-40,:], axis=(1,2)))
#%%
#roll_std[['head_angle', 'tail_angle']].plot()
#%%
#if False:
#%%
#plt.figure()
#plt.plot(head_int)
#plt.plot(tail_int)
#good_frames = trajectories_data.loc[trajectories_data['int_map_id']!=-1, 'frame_number'].values
#convert_f = {ff:ii for ii,ff in enumerate(good_frames)}
#%%
#plt.figure()
#plt.imshow(worm_int.T, interpolation='none', cmap='gray')
#plt.grid('off')
#%%
# ini = convert_f[20873]
# fin = convert_f[21033]
#
# plt.plot((ini, ini), plt.gca().get_ylim(), 'c:')
# plt.plot((fin, fin), plt.gca().get_ylim(), 'c--')
# plt.xlim((ini-100, fin+100))
#%%
#
#plt.figure()
##plt.imshow(worm_int_map[8000,:,wlim[0]:wlim[1]].T, interpolation='none', cmap='gray')
#plt.imshow(worm_int2.T, interpolation='none', cmap='gray')
#plt.grid('off')
#plt.xlim((5000, 6000))
#
| ver228/work-in-progress | work_in_progress/_old/worm_orientation/check_orientation_mov.py | check_orientation_mov.py | py | 9,334 | python | en | code | 0 | github-code | 13 |
41767232102 | class Solution:
def divide(self, dividend: int, divisor: int) -> int:
op = dividend/divisor
if op < 0:
op = math.ceil(op)
else:
op = math.floor(op)
if op > 2**31 - 1:
op = 2**31 - 1
if op < -1*(2**31):
op = -1*(2**31)
return op | ritwik-deshpande/LeetCode | 29-divide-two-integers/29-divide-two-integers.py | 29-divide-two-integers.py | py | 368 | python | nl | code | 0 | github-code | 13 |
37035930893 | import tkinter as tk
import requests
#Put your APY
APY_KEY = ""
BASE_URL = "https://api.openweathermap.org/data/2.5/weather"
def get_weather():
city = city_entry.get()
request_url = f"{BASE_URL}?appid={APY_KEY}&q={city}"
response = requests.get(request_url)
if response.status_code == 200:
data = response.json()
weather = data["weather"][0]["description"]
temp = round(data["main"]["temp"] - 273.15, 2)
weather_label.config(text=weather)
temp_label.config(text=temp)
else:
weather_label.config(text="Error")
root = tk.Tk()
root.title("Weather App")
city_label = tk.Label(root, text="Enter a city:")
city_label.pack()
city_entry = tk.Entry(root)
city_entry.pack()
get_weather_button = tk.Button(root, text="Get Weather", command=get_weather)
get_weather_button.pack()
weather_label = tk.Label(root, text="")
weather_label.pack()
temp_label = tk.Label(root, text="")
temp_label.pack()
root.mainloop()
| Ma1d3n/Weather-app | Weather.py | Weather.py | py | 1,016 | python | en | code | 0 | github-code | 13 |
776253524 | from numba import njit
from oceantracker.status_modifiers._base_status_modifers import _BaseStatusModifer
from oceantracker.common_info_default_param_dict_templates import particle_info
# globals
status_stranded_by_tide = int(particle_info['status_flags']['stranded_by_tide'])
status_frozen = int(particle_info['status_flags']['frozen'])
status_moving = int(particle_info['status_flags']['moving'])
class TidalStranding(_BaseStatusModifer):
def __init__(self):
# set up info/attributes
super().__init__() # required in children to get parent defaults
self.add_default_params({})
def check_requirements(self):
si = self.shared_info
self.check_class_required_fields_prop_etc(required_grid_var_list=['dry_cell_index'])
def update(self, time_sec,sel):
si=self.shared_info
part_prop = si.classes['particle_properties']
tidal_stranding_from_dry_cell_index(
si.classes['reader'].grid['dry_cell_index'],
part_prop['n_cell'].data,
sel,
part_prop['status'].data)
pass
@njit
def tidal_stranding_from_dry_cell_index(dry_cell_index, n_cell, sel, status):
# look at all particles in buffer to check total water depth < min_water_depth
# use 0-255 dry cell index updated at each interpolation update
for n in sel:
if status[n] >= status_frozen:
if dry_cell_index[n_cell[n]] > 128: # more than 50% dry
status[n] = status_stranded_by_tide
elif status[n] == status_stranded_by_tide:
# unstrand if already stranded, if status is on bottom, remains as is
status[n] = status_moving | oceantracker/oceantracker | oceantracker/status_modifiers/tidal_stranding.py | tidal_stranding.py | py | 1,701 | python | en | code | 10 | github-code | 13 |
40862270370 | from scapy.all import *
import numpy as np
import binascii
import seaborn as sns
import pandas as pd
sns.set(color_codes=True)
# %matplotlib inline
#this will be loaded into the flask web application in realtime..
def load_analyzer(datafile="dataset/manda-telescope-12-12-09-31-25-163929788500.pcap"):
payload={}
num_of_packets_to_sniff = 100
pcap = sniff(count=num_of_packets_to_sniff)
print(type(pcap))
print(len(pcap))
print(pcap)
payload['pcap']=pcap[0]
# rdpcap used to Read Pcap
pcap = pcap + rdpcap(datafile)
pcap
"""
0 1 2 3
0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
| Source Port | Destination Port |
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
| Sequence Number |
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
| Acknowledgment Number |
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
| Data | |U|A|P|R|S|F| |
| Offset| Reserved |R|C|S|S|Y|I| Window |
| | |G|K|H|T|N|N| |
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
| Checksum | Urgent Pointer |
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
| Options | Padding |
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
| data |
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
02 04 05 a0 01 03 03 05 01 01 08 0a 1d 74 65 c5 00 00 00 00 04 02 00 00
"""
payload['pcap_data']=pcap
ethernet_frame = pcap[101]
ip_packet = ethernet_frame.payload
segment = ip_packet.payload
data = segment.payload
payload['ethernet_frame_summary']=ethernet_frame.summary()
payload['packet_summary']=ip_packet.summary()
payload['segment_summary']=segment.summary()
payload['data_summary']=data.summary()
ethernet_frame.show()
payload['ethernet_frame_type']=ethernet_frame
payload['ip_packet_type']=type(ip_packet)
payload['segment_type']=type(segment)
ethernet_type = type(ethernet_frame)
ip_type = type(ip_packet)
tcp_type = type(segment)
payload['ethernet']=pcap[ethernet_type]
payload['ip']= pcap[ip_type]
payload['tcp']=pcap[tcp_type]
from scapy.layers.l2 import Ether
from scapy.layers.inet import IP
from scapy.layers.inet import TCP, UDP
payload['udp']=pcap[UDP]
# Collect field names from IP/TCP/UDP (These will be columns in DF)
ip_fields = [field.name for field in IP().fields_desc]
tcp_fields = [field.name for field in TCP().fields_desc]
udp_fields = [field.name for field in UDP().fields_desc]
dataframe_fields = ip_fields + ['time'] + tcp_fields + ['payload','payload_raw','payload_hex']
# Create blank DataFrame
df = pd.DataFrame(columns=dataframe_fields)
for packet in pcap[IP]:
# Field array for each row of DataFrame
field_values = []
# Add all IP fields to dataframe
for field in ip_fields:
if field == 'options':
# Retrieving number of options defined in IP Header
field_values.append(len(packet[IP].fields[field]))
else:
field_values.append(packet[IP].fields[field])
field_values.append(packet.time)
layer_type = type(packet[IP].payload)
for field in tcp_fields:
try:
if field == 'options':
field_values.append(len(packet[layer_type].fields[field]))
else:
field_values.append(packet[layer_type].fields[field])
except:
field_values.append(None)
# Append payload
field_values.append(len(packet[layer_type].payload))
field_values.append(packet[layer_type].payload.original)
field_values.append(binascii.hexlify(packet[layer_type].payload.original))
df_append = pd.DataFrame([field_values], columns=dataframe_fields)
df = pd.concat([df, df_append], axis=0)
df = df.reset_index()
df = df.drop(columns="index")
payload['dfloc']=df.iloc[0]
payload['df_shape']=df.shape
payload['df_head']=df.head()
payload['df_tail']=df.tail()
payload['df_src']=df['src']
df[['src','dst','sport','dport']]
payload['top_source_addresses']=df['src'].describe()
payload['top_destination_addresses']=df['dst'].describe()
frequent_address = df['src'].describe()['top']
payload['frequent_address']=frequent_address
payload['whose_address_is_speaking_to']=df[df['src'] == frequent_address]['dst'].unique()
payload['whose_top_address_destination_ports']=df[df['src'] == frequent_address]['dport'].unique()
payload['top_source_ports']=df[df['src'] == frequent_address]['sport'].unique()
payload['unique_addresses']=df['src'].unique()
payload['unique_destination_address']=df['dst'].unique()
source_addresses = df.groupby("src")['payload'].sum()
source_addresses.plot(kind='barh',title="Addresses Sending Payloads",figsize=(8,5))
payload['source_addresses']=source_addresses
from tkinter import *
from tkinter import messagebox
messagebox.showinfo("admin alert!", "Ploting the Network analysis for the file..")
# Group by Destination Address and Payload Sum
destination_addresses = df.groupby("dst")['payload'].sum()
destination_addresses.plot(kind='barh', title="Destination Addresses (Bytes Received)",figsize=(8,5))
# Group by Source Port and Payload Sum
source_payloads = df.groupby("sport")['payload'].sum()
source_payloads.plot(kind='barh',title="Source Ports (Bytes Sent)",figsize=(8,5))
destination_payloads = df.groupby("dport")['payload'].sum()
destination_payloads.plot(kind='barh',title="Destination Ports (Bytes Received)",figsize=(8,5))
frequent_address_df = df[df['src'] == frequent_address]
x = frequent_address_df['payload'].tolist()
sns.barplot(x="time", y="payload", data=frequent_address_df[['payload','time']],
label="Total", color="b").set_title("History of bytes sent by most frequent address")
"""# Investigating the payload
"""
# Create dataframe with only converation from most frequent address
frequent_address_df = df[df['src']==frequent_address]
# Only display Src Address, Dst Address, and group by Payload
frequent_address_groupby = frequent_address_df[['src','dst','payload']].groupby("dst")['payload'].sum()
# Plot the Frequent address is speaking to (By Payload)
frequent_address_groupby.plot(kind='barh',title="Most Frequent Address is Speaking To (Bytes)",figsize=(8,5))
# Which address has excahnged the most amount of bytes with most frequent address
suspicious_ip = frequent_address_groupby.sort_values(ascending=False).index[0]
print(suspicious_ip, "May be a suspicious address")
payload['the following_ip_may_be_suspicious']=suspicious_ip
# Create dataframe with only conversation from most frequent address and suspicious address
suspicious_df = frequent_address_df[frequent_address_df['dst']==suspicious_ip]
payload['suspicious_df']=suspicious_df
# Store each payload in an array
raw_stream = []
for p in suspicious_df['payload_raw']:
raw_stream.append(p)
payload['raw_stream_suspicion_payload']=raw_stream
return payload
| Valmoe/python-machine-learning-on-pcap-files | applib.py | applib.py | py | 8,063 | python | en | code | 0 | github-code | 13 |
10343461791 | from ursina import *
from ursina.prefabs.first_person_controller import FirstPersonController
from math import cos, sin, radians, tan
bullet_speed = 300 #m/s
app = Ursina()
floor_texture = load_texture('floor.png')
target_texture = load_texture('target.png')
sky_texture = load_texture('sky.jpg')
class target(Entity):
def __init__(self):
super().__init__(
model='cube',
scale_x=2,
scale_y=2,
scale_z=0.5,
collider='box',
color=color.white,
position=(2, 3, 0),
texture=target_texture
)
def update(self):
origin = self.world_position
hit_info = boxcast(origin, (0, 0, 1), ignore=(self,), distance=0, thickness=(3, 3, 3), debug=False)
if hit_info.hit:
destroy(self)
player = FirstPersonController()
Target = target()
gun_fire = Audio('gun_fire.mp3', loop=False, autoplay=False)
def distance(a, b):
A = abs(a[0]-b[0])
B = abs(a[1]-b[1])
C = abs(a[2]-b[2])
Distance = (A**2+B**2+C**2)**0.5
return Distance
def gravity(x, theta, c):
y = -(9.81/2)*(x/(bullet_speed*cos(radians(theta))))**2+tan(radians(theta))*x+c
return y
def update():
bullets = []
if held_keys['left shift']:
player.speed = 10
else:
player.speed = 5
if held_keys['left mouse']:
Bullet = bullet(position=Vec3(player.position.x, player.position.y+2, player.position.z), rotation=camera.world_rotation)
bullets.append(Bullet)
gun_fire.play()
if held_keys['right mouse']:
Target=target()
class bullet(Entity):
def __init__(self, position, rotation):
global theta3, c, bullet_count, theta_ro, direction
super().__init__(
model='cube',
color=color.violet,
position=Vec3(0, 0, 0),
scale_x=0.2,
scale_y=0.2,
scale_z=0.5,
collider='box',
rotation=rotation
)
moving_x = 0
moving_z = 0
moving_y = 0
theta = camera.world_rotation_y
theta2 = camera.world_rotation_x
if 0 <= theta <= 90:
moving_x = 2 * cos(radians(90 - theta))
moving_z = 2 * sin(radians(90 - theta))
elif 90<theta<=180:
moving_x = 2 * cos(radians(theta-90))
moving_z = -2 * sin(radians(theta-90))
elif -90<=theta<0:
moving_x = -2* cos(radians(90+theta))
moving_z = 2* sin(radians(90+theta))
elif -180<=theta<-90:
moving_x = -2*cos(radians(-theta-90))
moving_z = -2*sin(radians(-theta-90))
if 0<=theta2<=90:
moving_y = -2*sin(radians(theta2))
elif -90<=theta2<0:
moving_y = 2*sin(radians(-theta2))
new_position = position+Vec3(moving_x, moving_y, moving_z).normalized()
self.position = new_position
direction = Vec3(moving_x, moving_y, moving_z).normalized()
theta3 = -camera.world_rotation_x
theta_ro = camera.world_rotation_y
c = camera.world_position.y
bullet_count = 0
def update(self):
global theta3, c, bullet_count, theta_ro, direction
y = gravity(bullet_count * 0.5, theta3, c)
if theta3>=0:
if bullet_count < 10000:
if y>0:
if 0<=theta_ro<=90:
self.position = Vec3(self.position.x + (bullet_count * 0.5)*cos(radians(90-theta_ro)), y, self.position.z + (bullet_count*0.5)*sin(radians(90-theta_ro)))
elif 90<theta_ro<=180:
self.position = Vec3(self.position.x + (bullet_count * 0.5)*cos(radians(theta_ro-90)), y, self.position.z - (bullet_count*0.5)*sin(radians(theta_ro-90)))
elif -90<=theta_ro<0:
self.position = Vec3(self.position.x - (bullet_count * 0.5)*cos(radians(90+theta_ro)), y, self.position.z + (bullet_count*0.5)*sin(radians(theta_ro+90)))
elif -180<=theta_ro<-90:
self.position = Vec3(self.position.x - (bullet_count * 0.5)*cos(radians(-theta_ro-90)), y, self.position.z - (bullet_count*0.5)*sin(radians(-theta_ro-90)))
bullet_count += 1
else:
destroy(self)
else:
destroy(self)
else:
if self.position.y>0:
move_y = -sin(radians(-theta3))
if 0 <= theta_ro <= 90:
move_x = cos(radians(90-theta_ro))
move_z = sin(radians(90-theta_ro))
elif 90<theta_ro<=180:
move_x = cos(radians(theta_ro-90))
move_z = -sin(radians(theta_ro-90))
elif -90<=theta_ro<0:
move_x = -cos(radians(90+theta_ro))
move_z = sin(radians(theta_ro+90))
elif -180<=theta_ro<-90:
move_x = -cos(radians(-theta_ro-90))
move_z = -sin(radians(-theta_ro-90))
self.position = Vec3(self.position.x+move_x*bullet_count*0.5, self.position.y+move_y*bullet_count*0.5, self.position.z+move_z*bullet_count*0.5)
bullet_count += 1
else:
destroy(self)
try:
origin = self.world_position + (self.up*.5)
hit_info = boxcast(origin, direction, ignore=(self, player,), thickness=(0.2, 0.2), distance=0, debug=False)
if hit_info.hit:
destroy(self)
except:
destroy(self)
floor = Entity(model='cube', scale_x=100, scale_z=100, collider='box', color=color.brown, texture=floor_texture)
wall1 = Entity(model='cube', scale_x=100, scale_y=100, collider='box', color=color.green, position=(0, 0, 50))
wall2 = Entity(model='cube', scale_x=100, scale_y=100, collider='box', color=color.blue, position=(0, 0, -50))
wall3 = Entity(model='cube', scale_z=100, scale_y=100, collider='box', color=color.red, position=(50, 0, 0))
wall4 = Entity(model='cube', scale_z=100, scale_y=100, collider='box', color=color.yellow, position=(-50, 0, 0))
sky = Entity(model='sphere', scale=200, double_sided=True, texture=sky_texture)
app.run() | Hanheum/Gun_firing | ursina_trial.py | ursina_trial.py | py | 6,437 | python | en | code | 0 | github-code | 13 |
10894301162 | import sys
import pygame as py
import math as m
import Senior_Design_Variables as sd
from Iterator import Iterator
sys.path.insert(0, 'C:/Users/drunk/PycharmProjects/pythonProject/Pygame Mechanism Module/Pygame-Mechanism-Module')
import Variables as v
from Point import Point
from CsvWriter import CsvWriter
from CsvReader import CsvReader
from Two_Bar_Planar_Linear_Actuator_Arm import Arm
from Screen import Screen
from Mouse import Mouse
'''
fileWriteName = '/home/pi/Documents/Motor Control/Normal Walking Gait/03032021.csv'
fileReadName = '/home/pi/Documents/Motor Control/Normal Walking Gait/03032021.csv'
'''
fileWriteName = 'C:/Users/drunk/PycharmProjects/pythonProject/Pygame Mechanism Module/Pygame Mechanisms Projects/Hip Curves/03132021.csv'
fileReadName = 'C:/Users/drunk/PycharmProjects/pythonProject/Pygame Mechanism Module/Pygame-Mechanism-Module/Hip Curves/03132021.csv'
fileWriteNameIterator = 'C:/Users/drunk/PycharmProjects/pythonProject/Pygame Mechanism Module/Pygame Mechanisms Projects/Senior Design/Achievable Mechanisms/03262021.csv'
screen_dim_pix = 800
screen_dim_inch = 150
v.time_delay = 0
screen = Screen(screen_dim_pix, screen_dim_inch)
arm1 = Arm(screen, sd.linkLength1, sd.linkLength2, sd.actuator1_ground,
sd.actuator2_ground, sd.actuator1_connection, sd.linkage2_connection)
csvWriter = CsvWriter(screen, fileWriteName, arm1)
csvReader = CsvReader(screen, fileReadName)
mouse = Mouse(screen)
Point(screen, screen.inches_to_pixels(screen.origin_x + sd.linkLength1), screen.inches_to_pixels(screen.origin_y), 0, screen.points)
iterate = True
iterator = Iterator(screen, arm1)
csvWriterIterator = CsvWriter(screen, fileWriteNameIterator, iterator)
run = True
while run:
screen.initialize()
# Mouse Position
keys = py.key.get_pressed()
mouse_press = py.mouse.get_pressed()
mouse_pos = py.mouse.get_pos()
mouse.function(mouse_pos, mouse_press, 0)
screen.check_key_commands(keys)
if iterate:
iterator.iterate_ground_positions()
sd.work_space_origin = [26, -18]
sd.ground_positions_origin = [-28, sd.work_space_origin[1] - sd.work_space[1]]
iterator.iterate_ground_positions()
sd.work_space_origin = [24, -18]
sd.ground_positions_origin = [-30, sd.work_space_origin[1] - sd.work_space[1]]
iterator.iterate_ground_positions()
break
# Calculate all position and force variables based on current point
arm1.create()
arm1.kinetics(sd.patient_weight, sd.patient_angle)
screen.draw([[arm1]])
py.quit()
| mRobinson10-28-98/Pygame-Mechanisms-Projects | Senior Design/PM_Senior_Design.py | PM_Senior_Design.py | py | 2,570 | python | en | code | 0 | github-code | 13 |
7762893596 | import csv
import io
import queue
import datetime
import time
import threading
class loggingTelem():
def __init__(self, telemQ):
self.telemQ = telemQ
self.run_flag = threading.Event()
self.path = 'telemetry/testData%s.csv' % (str(datetime.datetime.today()))
self.csvFile = open(self.path, 'w', newline= '\n')
self.dataWriter = csv.writer(self.csvFile, delimiter = ',')
self.header = ['time','loadcell','tc1','tc2']
self.dataWriter.writerow(self.header)
print("peepeepoopoo")
def __del__(self):
self.csvFile.close()
def logTelem(self):
while not self.run_flag.is_set():
if (not (self.telemQ.empty())):
self.dataWriter.writerow(self.telemQ.get())
#print(self.telemQ.get())
time.sleep(.5)
def close_csv(self):
self.csvFile.close()
#Test for logging. Won't get called by main file
if __name__ == "__main__":
telemQTest = queue.Queue(maxsize = 100)
loggingObj = loggingTelem(telemQTest)
#loggingObj.path = '../telemetry/testData%s.csv' % (str(datetime.datetime.today()))
telemQTest.put(['beans1','beans1','beans1','beans1'])
telemQTest.put(['beans2','beans1','beans1','beans1'])
telemQTest.put(['beans3','beans1','beans1','beans1'])
telemQTest.put(['beans4','beans1','beans1','beans1'])
loggingObj.logTelem()
| CU-SRL/DAQ | pilot/pilotModule/loggingTelem.py | loggingTelem.py | py | 1,422 | python | en | code | 2 | github-code | 13 |
1652835565 | #这个打败了100%..
class Solution(object):
def maximalSquare(self, matrix):
"""
:type matrix: List[List[str]]
:rtype: int
"""
rows, max_size = len(matrix), 0
'''
size[i]: the current number of continuous '1's in a column of matrix. Reset when discontinued.
The idea is to do a by-row scan, updating size[i]
Then check if there are continuous elements in size whose value is bigger than current maximal size.
'''
if rows > 0:
cols = len(matrix[0])
size = [0] * cols
for x in xrange(rows):
# update size
count, size = 0, map(lambda x, y: x+1 if y == '1' else 0, size, matrix[x])
#这里matrix[x]应该代入匿名函数中的y,但是y要判断=='1'啊,matrix[x]不是一个list吗
#还是说matrix[x]是一个迭代器,迭代返回其中每一个值?应该是这样吧
for y in xrange(cols):
# check if it exceeds current maximal size
if size[y] > max_size:
count += 1
if count > max_size:
# increase maximal size by 1
max_size += 1
break
else:
count = 0
return max_size*max_size
| fire717/Algorithms | LeetCode/python/_221.MaximalSquare.py | _221.MaximalSquare.py | py | 1,421 | python | en | code | 6 | github-code | 13 |
16774786232 | import json
import os
import sys
import subprocess
import platform
from openmdao.utils.file_utils import files_iter
def nb2dict(fname):
with open(fname) as f:
return json.load(f)
def notebook_filter(fname, filters):
"""
Return True if the given notebook satisfies the given filter function.
Parameters
----------
fname : str
Name of the notebook file.
filters : list of functions
The filter functions. They take a dictionary as an arg and return True/False.
Returns
-------
bool
True if the filter returns True.
"""
dct = nb2dict(fname)
for f in filters:
if f(dct):
return True
return False
def is_parallel(dct):
"""
Return True if the notebook containing the dict uses ipyparallel.
"""
for cell in dct['cells']:
if cell['cell_type'] == 'code':
for line in cell['source']:
if 'ipyparallel' in line:
return True
return False
def section_filter(dct, section):
"""
Return True if the notebook containing the dict contains the given section string.
"""
for cell in dct['cells']:
if cell['cell_type'] == 'markdown':
for line in cell['source']:
if section in line and line.startswith('#'):
return True
return False
def string_filter(dct, s):
"""
Return True if the notebook containing the dict contains the given string.
"""
for cell in dct['cells']:
if cell['cell_type'] in ('markdown', 'code'):
for line in cell['source']:
if s in line:
return True
return False
def find_notebooks_iter(section=None, string=None):
filters = []
if section:
filters.append(lambda dct: section_filter(dct, section))
if string:
filters.append(lambda dct: string_filter(dct, string))
for f in files_iter(file_includes=['*.ipynb'], dir_excludes=['.ipynb_checkpoints', '_*']):
if not filters or notebook_filter(f, filters):
yield f
def pick_one(files):
print("Multiple matches found.")
while True:
for i, f in enumerate(files):
print(f"{i}) {f}")
try:
response = int(input("\nSelect the index of the file to view: "))
except ValueError:
print("\nBAD index. Try again.\n")
continue
if response < 0 or response > (len(files) + 1):
print(f"\nIndex {response} is out of range. Try again.\n")
continue
return files[response]
def _show_notebook_setup_parser(parser):
parser.add_argument('file', nargs='?', help='Look for notebook having the given base filename')
parser.add_argument('--section', action='store', dest='section',
help='Look for notebook(s) having the given section string.')
parser.add_argument('-s', '--string', action='store', dest='string',
help='Look for notebook(s) having the given string in a code or markdown '
'cell.')
def _show_notebook_exec(options, user_args):
"""
Display a notebook given a keyword.
"""
if options.file is None:
fname = None
elif options.file.endswith('.ipynb'):
fname = options.file
else:
fname = options.file + '.ipynb'
if fname is not None:
files = [f for f in find_notebooks_iter() if os.path.basename(f) == fname]
if not files:
print(f"Can't find file {fname}.")
sys.exit(-1)
else:
files = list(find_notebooks_iter(section=options.section, string=options.string))
if not files:
print(f"No matching notebook files found.")
sys.exit(-1)
if len(files) == 1:
show_notebook(files[0], nb2dict(files[0]))
else:
f = pick_one(files)
show_notebook(f, nb2dict(f))
def show_notebook(f, dct):
if is_parallel(dct):
pidfile = os.path.join(os.path.expanduser('~'), '.ipython/profile_mpi/pid/ipcluster.pid')
if not os.path.isfile(pidfile):
print("cluster isn't running...")
sys.exit(-1)
else:
# try to see if PID is running
with open(pidfile, 'r') as f:
pid = int(f.read().strip())
try:
import psutil
except ImportError:
# the following doesn't work for Windows, so if Windows, just proceed and
# hope the process exists
if platform.system() == 'Windows':
pass
else:
try:
os.kill(pid, 0)
except ProcessLookupError:
print("cluster isn't running...")
sys.exit(-1)
except PermissionError:
pass
else:
if not psutil.pid_exists(pid):
print("cluster isn't running...")
sys.exit(-1)
os.system(f"jupyter notebook {f}")
def _show_notebook_setup():
"""
A command to run the source cells from given notebook(s).
"""
return (
_show_notebook_setup_parser,
_show_notebook_exec,
"Display a given ipython notebook."
)
def notebook_src_cell_iter(fname):
"""
Iterate over source cells of the given notebook.
Parameters
----------
fname : str
Name of the notebook file.
Yields
------
list of str
Lines of the source cell.
list of str
Lines of the corresponding output.
int
Execution count.
"""
with open(fname) as f:
dct = json.load(f)
for cell in dct['cells']:
if cell['cell_type'] == 'code':
if cell['source']: # cell is not empty
yield cell['source'], cell['outputs'], cell['execution_count']
def get_full_notebook_src(fname):
"""
Return the full contents of source cells in the given notebook.
Parameters
----------
fname : str
Name of the notebook file.
Returns
-------
str
Source of the given notebook.
"""
lines = []
for srclines, _, _ in notebook_src_cell_iter(fname):
for s in srclines:
ls = s.lstrip()
if ls.startswith('!') or ls.startswith('%'):
lines.append(' ' * (len(s) - len(ls)) + 'pass # ' + ls.rstrip())
elif ls:
lines.append(s.rstrip())
return '\n'.join(lines)
def grep_notebooks(includes=('*.ipynb',), dir_excludes=('_build', '_srcdocs', '.ipynb_checkpoints'),
greps=()):
"""
Yield the file pathname and the full contents of source cells from matching notebooks.
Parameters
----------
includes : list of str
List of local file names or glob patterns to match.
dir_excludes : list of str
List of local directory names to skip.
greps : list of str
If not empty, only return names and source from notebooks whose source contains at least
one of the strings provided.
Yields
------
str
Full file pathname of a matching notebook.
str
Full contents of source cells from the given notebook.
"""
for fpath in files_iter(file_includes=includes, dir_excludes=dir_excludes):
full = get_full_notebook_src(fpath)
if not greps:
yield fpath, full
else:
for g in greps:
if g in full:
yield fpath, full
break
def run_notebook_src(fname, src, nprocs=1, show_src=True, timeout=None, outstream=sys.stdout,
errstream=sys.stderr):
"""
Execute python source code from notebook(s).
Parameters
----------
fname : str
Filename of the notebook.
src : str
Full python source contained in the notebook.
nprocs : int
Number of processes to use for cells using MPI.
show_src : bool
If True, display the full python source to outstream.
timeout : float
If set, terminate the running code after 'timeout' seconds.
outstream : file
File where output is written.
errstream : file
File where errors and warnings are written.
"""
# only run on nprocs if we find %px in the source
if '%px' not in src:
nprocs = 1
print('&' * 20, ' running', fname, ' ' + '&' * 20, file=outstream, flush=True)
if show_src:
print(src, file=outstream)
print('-=' * 40, file=outstream)
with open('_junk_.py', 'w') as tmp:
tmp.write(src)
if nprocs == 1:
proc = subprocess.run(['python', '_junk_.py'], text=True, capture_output=True,
timeout=timeout)
else:
proc = subprocess.run(['mpirun', '-n', str(nprocs), 'python', '_junk_.py'],
text=True, capture_output=True, timeout=timeout)
try:
os.remove('_junk.py')
except OSError:
pass
if proc.returncode != 0:
print(f"{fname} return code = {proc.returncode}.", file=errstream)
print(proc.stdout, file=outstream)
print(proc.stderr, file=errstream)
print('-=' * 40, file=outstream)
def _run_notebook_exec(options, user_args):
if options.recurse:
if options.file:
print("When using the --recurse option, don't specify filenames. Use --include "
"instead.")
sys.exit(-1)
if not options.includes:
options.includes = ['*.ipynb']
outs = open('run_notebooks.out', 'w')
errs = open('run_notebooks.err', 'w')
for fpath, src in grep_notebooks(includes=options.includes,
greps=options.greps):
if options.dryrun:
print(fpath)
else:
run_notebook_src(fpath, src, nprocs=options.nprocs, timeout=options.timeout,
outstream=outs, errstream=errs)
else:
if options.includes:
print("The --include option only works when also using --recurse.")
sys.exit(-1)
for f in sorted(options.file):
if os.path.isdir(f):
continue
if not f.endswith('.ipynb'):
print(f"'{f}' is not a notebook.")
continue
if not os.path.isfile(f):
print(f"Can't find file '{f}'.")
sys.exit(-1)
src = get_full_notebook_src(f)
run_notebook_src(f, src, nprocs=options.nprocs, timeout=options.timeout)
def _run_notebook_setup_parser(parser):
parser.add_argument('file', nargs='*', help='Jupyter notebook file(s).')
parser.add_argument('-r', '--recurse', action='store_true', dest='recurse',
help='Search through all directories at or below the current one for the '
'specified file(s). If no files are specified, execute all jupyter '
'notebook files found.')
parser.add_argument('-i', '--include', action='append', dest='includes',
default=[], help='If the --recurse option is active, this specifies a '
'local filename or glob pattern to match. This argument may be supplied '
'multiple times.')
parser.add_argument('-g', '--grep', action='append', dest='greps',
default=[], help='Run only notebooks that contain this string. This '
'argument can be supplied multiple times.')
parser.add_argument('-n', '--nprocs', action='store', dest='nprocs',
default=4, type=int,
help='The number of processes to use for MPI cases.')
parser.add_argument('-d', '--dryrun', action='store_true', dest='dryrun',
help="Report which notebooks would be run but don't actually run them.")
parser.add_argument('--timeout', action='store', dest='timeout', type=float,
help='Timeout in seconds. Run will be terminated if it takes longer than '
'timeout.')
def _run_notebook_setup():
"""
A command to run the source cells from given notebook(s).
"""
return (
_run_notebook_setup_parser,
_run_notebook_exec,
"Run a given ipython notebook or collection of notebooks and store their output."
)
| naylor-b/om_devtools | om_devtools/notebook_utils.py | notebook_utils.py | py | 12,609 | python | en | code | 1 | github-code | 13 |
20498666213 | '''
Created on Sep 6, 2018
@author: Adrian Ridder
'''
#import CardGame
from copy import copy
from CardGame import cardDeck, compareHands,drawRandomCommunityCards, getBestCommunityCards
def hypotheticalHands(times, deck, hand, community, bestCommunityCards = False):
'''Creates hypothetical hands with randomly chosen community cards. RANDOM.'''
wins = 0
losses = 0
for i in range(times):
haventLostYet = True
copyDeck = cardDeck(copy(deck.deck))
opponents = list()
for x in range(5):
if bestCommunityCards == False:
opponents.append(drawRandomCommunityCards(copyDeck.dealHand(3), community))
else:
opponents.append(getBestCommunityCards(copyDeck.dealHand(3), community))
ind = 0
while(haventLostYet and ind < 5):
if compareHands(hand, opponents[ind]) == opponents[ind]:
losses += 1
haventLostYet = False #now we HAVE lost. Wampasaurus.
ind += 1
if haventLostYet:
wins += 1
return wins / times
def getExpectedWin(indStats, number):
twentyStats = dict()
for x in range(500):
if indStats[number][x] not in twentyStats.keys(): #if not a key, make it a key
twentyStats[indStats[number][x]] = indStats[number][x]
else:
twentyStats[indStats[number][x]] += indStats[number][x]
expectedValue = 0
for key in twentyStats.keys():
expectedValue += (key * twentyStats[key])
return expectedValue / 500
def theRealDeal(stats, randomHypothetical = False):
for time in range(500):
deck = cardDeck()
hand = deck.dealHand(3)
community = deck.dealHand(5)
hand = getBestCommunityCards(hand, community)
if randomHypothetical == True:
stats[20].append(hypotheticalHands(20, deck, hand, community, True))
stats[100].append(hypotheticalHands(100, deck, hand, community, True))
stats[200].append(hypotheticalHands(200, deck, hand, community, True))
else:
stats[20].append(hypotheticalHands(20, deck, hand, community))
stats[100].append(hypotheticalHands(100, deck, hand, community))
stats[200].append(hypotheticalHands(200, deck, hand, community))
#Actual Hand
opponents = list()
for x in range(5): #do this five times because there are five opponents
opponents.append(getBestCommunityCards(deck.dealHand(3), community))
haventLostYet = True
ind = 0
while(haventLostYet and ind < 5):
if compareHands(hand, opponents[ind]) == opponents[ind]:
stats['real'].append(0)
haventLostYet = False #now we HAVE lost
ind += 1
if haventLostYet:
stats['real'].append(1)
if __name__ == '__main__':
#Run test for random community card choice
stats = {20:[], 100:[], 200:[], 'real':[]}
theRealDeal(stats)
wins = list(stats['real']).count(1)
realWin = wins / 500
twentyExpected = getExpectedWin(stats, 20)
twentyError = abs(realWin - twentyExpected)
hundredExpected = getExpectedWin(stats, 100)
hundredError = abs(realWin - hundredExpected)
twoHundredExpected = getExpectedWin(stats, 200)
twoHundredError = abs(realWin - twoHundredExpected)
print(f"Expected win rate at 20: {twentyExpected} Actual win rate: {realWin} Error: {twentyError}")
print(f"Expected win rate at 100: {hundredExpected} Actual win rate: {realWin} Error: {hundredError}")
print(f"Expected win rate at 200: {twoHundredExpected} Actual win rate: {realWin} Error: {twoHundredError}")
#Reset everything for the best community cards
stats = {20:[], 100:[], 200:[], 'real':[]}
theRealDeal(stats, True)
wins = list(stats['real']).count(1)
realWin = wins / 500
twentyExpected = getExpectedWin(stats, 20)
twentyError = abs(realWin - twentyExpected)
hundredExpected = getExpectedWin(stats, 100)
hundredError = abs(realWin - hundredExpected)
twoHundredExpected = getExpectedWin(stats, 200)
twoHundredError = abs(realWin - twoHundredExpected)
print(f"Expected win rate at 20: {twentyExpected} Actual win rate: {realWin} Error: {twentyError}")
print(f"Expected win rate at 100: {hundredExpected} Actual win rate: {realWin} Error: {hundredError}")
print(f"Expected win rate at 200: {twoHundredExpected} Actual win rate: {realWin} Error: {twoHundredError}")
| adrian6912/Card-game | main.py | main.py | py | 4,744 | python | en | code | 0 | github-code | 13 |
2169202586 |
#클래스생성과 객체성성
class 빵틀:
모양=str()
반죽=str()
앙꼬=str()
단가=int()
def 굽기(self,주문갯수):
굽는횟수 = (주문갯수-1)/10+1
완성시간 = int(굽는횟수)*5
return 완성시간
def 가격(self,주문갯수):
금액 = 주문갯수*self.단가
return 금액
def 주문(self,주문갯수,지불금액):
대기시간 = self.굽기(주문갯수)
주문금액 = self.가격(주문갯수)
거스름돈 = 지불금액 - self.가격(주문갯수)
print(대기시간)
print(주문금액)
print(거스름돈)
return 거스름돈, 대기시간
붕어빵 = 빵틀()
잉어빵 = 빵틀()
붕어빵.모양='붕어'
붕어빵.반죽='밀가루'
붕어빵.앙꼬='팥'
붕어빵.단가 = 600
print('붕어빵 주문 (1개 300원)')
order = 20
payment = 10000
k = 100
wait_time, change= 빵틀.주문(order,payment)
shape = 붕어빵.모양
#
# print('{}빵 {}개를 주문하였고 {}원을 지불하였습니다.' .format(shape,order,payment))
#
# if change==0:
# message = '손님 {}분만 기다려주세요'.format(wait_time)
# elif change>0:
# message = '잔돈은 {}원 입니다. {}분만 기다려주세요'.format(change,wait_time)
# elif change<0:
# message = '손님 금액이 {}원 부족합니다.'.format(change)
# else:
# message = 'Error'
#
# print('==>', end='\t')
# print(message)
# print('*'*50) | mokimoki191225/jbfc_220506 | pycharm/class/클래스5.py | 클래스5.py | py | 1,563 | python | ko | code | 0 | github-code | 13 |
15805181216 | def average(array):
heights=set(array)
sum=0
for item in heights:
sum+=item
avg=sum/len(heights)
return avg
# n = int(input())
# arr = list(map(int, input().split()))
arr=[161,182,161,154,176,170,167,171,170,174]
result = average(arr)
print(result) | Aakash9399/python | introset.py | introset.py | py | 287 | python | en | code | 0 | github-code | 13 |
39751058272 | from typing import Dict, Union
# Third Party Imports
from sqlalchemy import Column, Float, Integer, String
from sqlalchemy.orm import relationship
# RAMSTK Local Imports
from .. import RAMSTK_BASE
from .baserecord import RAMSTKBaseRecord
class RAMSTKCategoryRecord(RAMSTK_BASE, RAMSTKBaseRecord): # type: ignore
"""Class to represent table ramstk_category in the RAMSTK Common database.
Types of category are:
1. Hardware
2. Risk
3. Software
4. Incident
5. Action
"""
__defaults__ = {
"name": "Category Name",
"description": "Category Description",
"category_type": "unknown",
"value": 1,
"harsh_ir_limit": 0.8,
"mild_ir_limit": 0.9,
"harsh_pr_limit": 1.0,
"mild_pr_limit": 1.0,
"harsh_vr_limit": 1.0,
"mild_vr_limit": 1.0,
"harsh_deltat_limit": 0.0,
"mild_deltat_limit": 0.0,
"harsh_maxt_limit": 125.0,
"mild_maxt_limit": 125.0,
}
__tablename__ = "ramstk_category"
__table_args__ = {"extend_existing": True}
category_id = Column(
"fld_category_id",
Integer,
primary_key=True,
autoincrement=True,
nullable=False,
)
name = Column("fld_name", String(256), default=__defaults__["name"])
description = Column(
"fld_description", String(512), default=__defaults__["description"]
)
category_type = Column(
"fld_category_type", String(256), default=__defaults__["category_type"]
)
value = Column("fld_value", Integer, default=__defaults__["value"])
harsh_ir_limit = Column(
"fld_harsh_ir_limit", Float, default=__defaults__["harsh_ir_limit"]
)
mild_ir_limit = Column(
"fld_mild_ir_limit", Float, default=__defaults__["mild_ir_limit"]
)
harsh_pr_limit = Column(
"fld_harsh_pr_limit", Float, default=__defaults__["harsh_pr_limit"]
)
mild_pr_limit = Column(
"fld_mild_pr_limit", Float, default=__defaults__["mild_pr_limit"]
)
harsh_vr_limit = Column(
"fld_harsh_vr_limit", Float, default=__defaults__["harsh_vr_limit"]
)
mild_vr_limit = Column(
"fld_mild_vr_limit", Float, default=__defaults__["mild_vr_limit"]
)
harsh_deltat_limit = Column(
"fld_harsh_deltat_limit",
Float,
default=__defaults__["harsh_deltat_limit"],
)
mild_deltat_limit = Column(
"fld_mild_deltat_limit",
Float,
default=__defaults__["mild_deltat_limit"],
)
harsh_maxt_limit = Column(
"fld_harsh_maxt_limit", Float, default=__defaults__["harsh_maxt_limit"]
)
mild_maxt_limit = Column(
"fld_mild_maxt_limit", Float, default=__defaults__["mild_maxt_limit"]
)
# Define the relationships to other tables in the RAMSTK Program database.
subcategory = relationship( # type: ignore
"RAMSTKSubCategoryRecord",
back_populates="category",
cascade="delete",
)
mode = relationship( # type: ignore
"RAMSTKFailureModeRecord",
back_populates="category",
cascade="delete",
)
def get_attributes(self) -> Dict[str, Union[float, int, str]]:
"""Retrieve current values of the RAMSTKCategory data model attributes.
:return: {category_id, name, description, category_type, value,
harsh_ir_limit, mild_ir_limit, harsh_pr_limit,
mild_pr_limit, harsh_vr_limit, mild_vr_limit,
harsh_deltat_limit, mild_deltat_limit, harsh_maxt_limit,
mild_maxt_limit} pairs
:rtype: dict
"""
return {
"category_id": self.category_id,
"name": self.name,
"description": self.description,
"category_type": self.category_type,
"value": self.value,
"harsh_ir_limit": self.harsh_ir_limit,
"mild_ir_limit": self.mild_ir_limit,
"harsh_pr_limit": self.harsh_pr_limit,
"mild_pr_limit": self.mild_pr_limit,
"harsh_vr_limit": self.harsh_vr_limit,
"mild_vr_limit": self.mild_vr_limit,
"harsh_deltat_limit": self.harsh_deltat_limit,
"mild_deltat_limit": self.mild_deltat_limit,
"harsh_maxt_limit": self.harsh_maxt_limit,
"mild_maxt_limit": self.mild_maxt_limit,
}
| ReliaQualAssociates/ramstk | src/ramstk/models/dbrecords/commondb_category_record.py | commondb_category_record.py | py | 4,409 | python | en | code | 34 | github-code | 13 |
17049174494 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import json
from alipay.aop.api.constant.ParamConstants import *
class BelongMerchantInfoDTO(object):
def __init__(self):
self._business_type = None
self._merchant_id = None
self._merchant_open_id = None
@property
def business_type(self):
return self._business_type
@business_type.setter
def business_type(self, value):
self._business_type = value
@property
def merchant_id(self):
return self._merchant_id
@merchant_id.setter
def merchant_id(self, value):
self._merchant_id = value
@property
def merchant_open_id(self):
return self._merchant_open_id
@merchant_open_id.setter
def merchant_open_id(self, value):
self._merchant_open_id = value
def to_alipay_dict(self):
params = dict()
if self.business_type:
if hasattr(self.business_type, 'to_alipay_dict'):
params['business_type'] = self.business_type.to_alipay_dict()
else:
params['business_type'] = self.business_type
if self.merchant_id:
if hasattr(self.merchant_id, 'to_alipay_dict'):
params['merchant_id'] = self.merchant_id.to_alipay_dict()
else:
params['merchant_id'] = self.merchant_id
if self.merchant_open_id:
if hasattr(self.merchant_open_id, 'to_alipay_dict'):
params['merchant_open_id'] = self.merchant_open_id.to_alipay_dict()
else:
params['merchant_open_id'] = self.merchant_open_id
return params
@staticmethod
def from_alipay_dict(d):
if not d:
return None
o = BelongMerchantInfoDTO()
if 'business_type' in d:
o.business_type = d['business_type']
if 'merchant_id' in d:
o.merchant_id = d['merchant_id']
if 'merchant_open_id' in d:
o.merchant_open_id = d['merchant_open_id']
return o
| alipay/alipay-sdk-python-all | alipay/aop/api/domain/BelongMerchantInfoDTO.py | BelongMerchantInfoDTO.py | py | 2,047 | python | en | code | 241 | github-code | 13 |
41603470196 | #!/usr/bin/env python
from setuptools import setup, find_packages
__version__ = '0.1'
setup(
name='nnet',
version=__version__,
url='https://github.com/zhaoyan1117/NeuralNet',
packages=find_packages(),
include_package_data=True,
)
| zhaoyan1117/NeuralNet | setup.py | setup.py | py | 252 | python | en | code | 0 | github-code | 13 |
1721742797 | """Camera library, a component of vision library. Takes images.
"""
import RPi.GPIO as GPIO
import time
from lib_utils import *
import numpy as np
from picamera import PiCamera
class Camera():
"""Camera takes images and saves them in arrays to be processed by Blob.
Attributes:
CAMLED (int): GPIO output that is used to switch between right and left camera
img (int): Array for raw RGB image, (U_CAM_MRES, U_CAM_NRES, 3)
no_l (int): Image number taken by the left camera, for labelling stored images
no_r (int): Image number taken by the right camera, for labelling stored images
picam (): PiCamera object
side (string): Camera side, right or left
store_img (bool): Option to save images
"""
# camera settings
picam = PiCamera()
picam.resolution = (U_CAM_NRES, U_CAM_MRES) # (horizontal, vertical)
picam.framerate = 60
picam.color_effects = (128, 128) # black and white
picam.awb_mode = 'off'
picam.awb_gains = (1, 1)
picam.iso = 125
picam.brightness = 35
picam.contrast = 100
CAMLED = 40
GPIO.setup(CAMLED, GPIO.OUT)
def __init__(self, side, store_img=False):
"""One Camera object is instantiated for each camera, i.e., the right and the left camera.
Args:
side (string): Camera side, right or left
store_img (bool, optional): Option to save images
"""
self.side = side
self.store_img = store_img
if self.store_img:
self.no_r = 0
self.no_l = 0
self.img = np.empty((U_CAM_MRES, U_CAM_NRES, 3), dtype=np.uint8)
def capture(self):
"""Takes an image with the selected camera (right or left) and stores it in self.img. Additionally saves the image if store_img=True.
"""
if self.side == 'right':
GPIO.output(self.CAMLED, U_CAM_RIGHT) # set to right cam
if self.store_img:
self.picam.rotation = 180
self.picam.capture('./{}/{}_r{}.jpg'.format(U_FILENAME, U_FILENAME, self.no_r), use_video_port=True)
self.picam.rotation = 0
self.no_r += 1
elif self.side == 'left':
GPIO.output(self.CAMLED, U_CAM_LEFT) # set to left cam
if self.store_img:
self.picam.rotation = 180
self.picam.capture('./{}/{}_l{}.jpg'.format(U_FILENAME, U_FILENAME, self.no_l), use_video_port=True)
self.picam.rotation = 0
self.no_l += 1
else:
print('camera error: select btw right and left camera')
self.picam.capture(self.img, 'rgb', use_video_port=True)
def capture_sequence(self, imgs):
if self.side == 'right':
GPIO.output(self.CAMLED, U_CAM_RIGHT) # set to right cam
elif self.side == 'left':
GPIO.output(self.CAMLED, U_CAM_LEFT) # set to left cam
self.picam.capture_sequence(imgs, format='rgb', use_video_port=True)
def std_settings(self):
self.picam.framerate = 60
self.picam.exposure_mode = 'auto'
self.picam.color_effects = (128, 128) # black and white
self.picam.awb_gains = (1, 1)
self.picam.brightness = 35
def redblue_settings(self):
self.picam.color_effects = None
self.picam.awb_gains = (4.0, 1.0)
self.picam.framerate = 20
self.picam.shutter_speed = 50000 # us
self.picam.exposure_mode = 'off'
self.picam.brightness = 15
def colorbot_settings(self):
self.picam.awb_mode = 'auto'
self.picam.iso = 0 # auto
self.picam.brightness = 100 # default is 50
self.picam.contrast = 100 # default is 0
self.picam.sharpness = 100 # default is 0
#picam.zoom = (0, 0, 1, 0.5) #fb crops (x,y,w,h), play with it, need circular mask to avoid black edges
| fberlinger/blueswarm | fishfood/lib_camera.py | lib_camera.py | py | 3,921 | python | en | code | 2 | github-code | 13 |
35710360766 | from django.urls import path
from . import views
from django.conf.urls.static import static
from django.conf import settings
urlpatterns = [
path('', views.index),
path('generic/', views.generic),
path('elements/',views.elementpage),
path('article/<int:article_id>/', views.article_page,name ='article_page'),
path('apply_author/',views.apply_author),
path('authors/',views.authors),
path('author/<int:author_id>/',views.author,name = "author"),
path('hello/',views.hello),
path('articles/',views.listing),
] + static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT) | jokerhan930/myblog | blog/urls.py | urls.py | py | 610 | python | en | code | 0 | github-code | 13 |
7931900912 | """
Text version of the 52-card Blackjack Late Surrender game for one human Player, and a computer Dealer.
Allows Splits, Doubling Down, Insurance and Surrender.
Dealer stands on soft 17. Blackjack pays 3:2.
To display cards, uses rectangles constructed of pipe operators and underscores.
To display chips, uses colored capital O letters.
"""
from interactive_functions import ask_players_name, hit_or_stand, split_requested, surrender_requested,\
double_down_requested, insurance_requested, press_enter_to_continue, cheque_change_requested, more_chips_requested,\
replay
from card_related_classes import Deck, surrender_offered
from player_classes import Player, HumanPlayer, EMPTY, NEW_SPLIT_HAND_NUMBERS
# gameplay functions:
def players_turn(player, dealer, deck):
"""
Plays a Hand (and all Split Hands created by splitting it) for the human player. Requires input from player.
:param player: a HumanPlayer object representing the human player.
:param dealer: a Player object representing the computer Dealer.
:param deck: a Deck object representing a 52-card French-suited deck.
:return: a tuple containing 3 lists and a boolean: a list of integers representing final scores of all hands played;
a list of booleans showing if there was a Natural Blackjack for each hand played; a list of names of
corresponding wagers for each hand played; and a boolean showing if surrender has been requested.
:rtype: score_list: list, natural_list: list, wager_list: list, sr_requested: bool
"""
# creating the list of all human player's Blackjack hands left to play:
hand_list = [player.hand]
# variable score_list will be used to store the list of final scores of all hands played:
score_list = []
# variable natural_list will be used to store the list of booleans showing if there was a Natural Blackjack for each
# hand played:
natural_list = []
# variable natural_list will be used to store the list of names of corresponding wagers for each hand played:
wager_list = []
# variable sr_requested will be used to store the boolean showing whether or not surrender is requested
sr_requested = False
# while loop checking if there are any hands left to play:
while len(hand_list) > 0:
# removing a hand to be played from the list:
hand = hand_list.pop(0)
# clearing the screen:
print('\n' * 100)
# showing dealer's cards one face up, one face down:
dealer.hand.display_one_face_down('Dealer')
# showing human player's cards face up:
hand.display_face_up(player.name)
# determining which wager corresponds to the hand being played:
if hand.split_wager_number == 0:
wager_name = 'Main Wager'
else:
wager_name = f'Split Wager {hand.split_wager_number}'
# checking for the natural Blackjack:
if hand.score == 21:
natural = True
print('\nBLACKJACK!')
press_enter_to_continue()
else:
natural = False
# checking if surrender is possible on this hand:
if hand.type == 'Normal' and dealer.hand.score != 21 and player.wagers[wager_name]['Amount'] > 1:
# checking if player has been dealt a 14, 15, 16, or 17:
if surrender_offered(hand, dealer.hand.cards[0].rank):
sr_requested = surrender_requested()
if sr_requested:
player.surrender()
break
# checking if splitting is allowed for this hand:
if hand.type == 'Normal' or hand.split_hand_number in ['1', '2']:
# checking if there is a pair of same-rank cards and if human player has enough chips for a split:
if hand.cards[0].rank == hand.cards[1].rank and player.chips['Amount'] >= \
player.wagers[wager_name]['Amount']:
# displaying all human player's chips and wagers:
print('\n')
player.display_chips('Chips')
print('\n')
player.display_chips(*player.wagers.keys())
# asking if human player wants to split the pair:
if split_requested():
# splitting:
# determining the new split wager numbers:
if hand.type == 'Normal':
new_split_wager_numbers = (0, 1)
elif hand.type == 'Split Hand' and hand.split_hand_number == '1':
new_split_wager_numbers = (0, 2)
elif hand.type == 'Split Hand' and hand.split_hand_number == '2':
if len(player.split_hands) == 2:
new_split_wager_numbers = (1, 2)
elif len(player.split_hands) == 4:
new_split_wager_numbers = (1, 3)
# splitting the wager:
player.double_wager('Split', *new_split_wager_numbers)
# creating two new split hands:
player.start_split_hand(NEW_SPLIT_HAND_NUMBERS[hand.split_hand_number][0],
new_split_wager_numbers[0])
player.start_split_hand(NEW_SPLIT_HAND_NUMBERS[hand.split_hand_number][1],
new_split_wager_numbers[1])
# splitting the pair:
split_card1, split_card2 = hand.split_pair()
# adding one of the split cards to each split hand:
player.split_hands[-2].add_card_from_split(split_card1)
player.split_hands[-1].add_card_from_split(split_card2)
# adding one card from deck to each split hand:
player.split_hands[-2].add_card_from_deck(deck)
player.split_hands[-1].add_card_from_deck(deck)
hand_list = [player.split_hands[-2], player.split_hands[-1]] + hand_list
# clearing the screen:
print('\n' * 100)
# displaying the updated human player's chips and wagers:
player.display_chips('Chips')
print('\n')
player.display_chips(*player.wagers.keys())
# asking the player to press enter to continue:
press_enter_to_continue()
continue
# checking if doubling down is possible:
if hand.score in [10, 11] and player.chips['Amount'] >= player.wagers[wager_name]['Amount']:
# clearing the screen:
print('\n' * 100)
# showing dealer's cards one face up, one face down:
dealer.hand.display_one_face_down('Dealer')
# showing human player's cards face up:
hand.display_face_up(player.name)
print('\n')
# displaying all human player's chips and wagers:
player.display_chips('Chips')
print('\n')
player.display_chips(*player.wagers.keys())
# asking if human player wants to double down:
dd_requested = double_down_requested()
# doubling down:
if dd_requested:
# doubling the wager:
player.double_wager('Double Down', hand.split_wager_number)
# clearing the screen:
print('\n' * 100)
# displaying the updated human player's chips and wagers:
player.display_chips('Chips')
print('\n')
player.display_chips(*player.wagers.keys())
# asking human player to press enter to continue:
press_enter_to_continue()
# clearing the screen:
print('\n' * 100)
# showing dealer's cards one face up, one face down:
dealer.hand.display_one_face_down('Dealer')
# showing human player's cards face up:
hand.display_face_up(player.name)
# doubling down not possible:
else:
dd_requested = False
# checking if human player has split a pair of Aces:
if hand.type == 'Split Hand' and hand.cards[0].rank == 'A':
# the player is only allowed to draw one card on each split Ace:
print("\nYou can't take any more cards to this hand (split Aces)")
hit = False
# asking human player to press enter to continue:
press_enter_to_continue()
# in all other cases, player is allowed to draw at least one more card:
else:
hit = True
# while loop checking if the hand score is still less than 21, and human player is allowed and willing to hit
# one more card:
while hand.score < 21 and hit:
# asking human player to choose hit or stand:
hit = hit_or_stand()
# hitting:
if hit:
# adding one card from deck to the hand:
hand.add_card_from_deck(deck)
# clearing the screen:
print('\n' * 100)
# showing dealer's cards one face up, one face down:
dealer.hand.display_one_face_down('Dealer')
# showing human player's cards face up:
hand.display_face_up(player.name)
# checking if there was a double down:
if dd_requested and hand.score < 21:
# the player is only allowed to draw one card after doubling down:
print("\nYou can't take any more cards to this hand (Double Down)")
hit = False
# asking human player to press enter to continue:
press_enter_to_continue()
# checking for a bust:
if hand.score > 21:
print('\nBUST!')
# asking human player to press enter to continue:
press_enter_to_continue()
# checking for a 21:
elif hand.score == 21:
print('\n'
'YOU HAVE A 21!')
# asking human player to press enter to continue:
press_enter_to_continue()
# adding the final hand score to the score list:
score_list.append(hand.score)
# adding the boolean showing whether there was a natural Blackjack to the natural list:
natural_list.append(natural)
# adding the name of corresponding wager to the wager list:
wager_list.append(wager_name)
# after all hands have been played, return the score list, the natural list, the wager list, and the boolean
# showing if Surrender has been requested:
return score_list, natural_list, wager_list, sr_requested
def dealers_turn(dealer, player, player_score_list, player_natural_list, deck):
"""
Offers human player an Insurance bet if Dealer's upcard is an Ace, and plays the Dealer's hand.
:param player: a HumanPlayer object representing the human player.
:param dealer: a Player object representing the computer Dealer.
:param player_score_list: a list containing final scores of all HumanPlayer's Hands played in current round
:param player_natural_list: a list containing booleans showing if there was a Natural Blackjack for each
HumanPlayer's Hand played in current round
:param deck: a Deck object representing a 52-card French-suited deck
:returns: a tuple containing Dealer's final score, and a boolean showing if Dealer has a natural Blackjack
:rtype: self.score: int, natural: bool
"""
# the initial dealer's Hand score:
dealer_score = dealer.hand.score
# checking if Dealer has a Blackjack:
dealer_natural = dealer.hand.score == 21
# checking if human player has played any hands without both a bust or a Natural Blackjack:
for score, natural in zip(player_score_list, player_natural_list):
if score <= 21 and not natural:
insurance_possible = True
break
else:
insurance_possible = False
if insurance_possible:
# checking if Dealer's upcard is an Ace and if human player has any chips left:
if dealer.hand.cards[0].rank == 'A' and player.chips['Amount'] > 0:
print('\n' * 100)
# showing dealer's cards one face up, one face down:
dealer.hand.display_one_face_down('Dealer')
# showing human player's cards face up:
# if there are no split hands:
if len(player.split_hands) == 0:
player.hand.display_face_up(player.name)
else:
# showing all non-empty Split Hands:
for hand in player.split_hands:
if hand.score > 0:
hand.display_face_up(player.name)
print('\n')
# displaying all human player's chips and wagers:
player.display_chips('Chips')
print('\n')
player.display_chips(*player.wagers.keys())
# asking if human player wants to place an Insurance bet:
if insurance_requested():
# placing the Insurance bet:
print('\n')
player.place_bet('Insurance')
# clearing the screen:
print('\n' * 100)
# displaying the updated human player's chips and wagers:
player.display_chips('Chips')
print('\n')
player.display_chips(*player.wagers.keys())
# asking human player to press enter to continue:
press_enter_to_continue()
# playing Dealer's hand up to soft 17:
dealer_score, dealer_natural = dealer.hand.play_for_dealer(deck)
# clearing the screen:
print('\n' * 100)
# showing dealer's cards face up:
dealer.hand.display_face_up('Dealer')
# showing human player's cards face up:
# if there are no split hands:
if len(player.split_hands) == 0:
player.hand.display_face_up(player.name)
else:
# showing all non-empty Split Hands:
for hand in player.split_hands:
if hand.score > 0:
hand.display_face_up(player.name)
print('\n')
# returning Dealer's final score and a boolean showing if Dealer has a Blackjack:
return dealer_score, dealer_natural
def check_outcome_and_add_winnings(player, player_score_list, player_natural_list, player_wager_list, dealer_score,
dealer_natural):
"""
Checks the outcome of a round and adds winnings (if any) to HumanPlayer's chips
:param player: a HumanPlayer object representing the human player
:param player_score_list: a list containing final scores of all HumanPlayer's Hands played in current round
:param player_natural_list: a list containing booleans showing if there was a Natural Blackjack for each
HumanPlayer's Hand played in current round
:param player_wager_list: a list of names of corresponding wagers for each hand played in current round
:param dealer_score: dealer's final score
:param dealer_natural: a boolean showing if dealer has a Natural Blackjack
:return: none
"""
# if there was no split and just one hand has been played:
if len(player_score_list) == 1:
# checking for human player's Blackjack:
if player_natural_list[0]:
# checking for Dealer's Blackjack:
if not dealer_natural:
print (f'{player.name} has won 3:2!')
player.add_winnings('3:2', 'Main Wager')
else:
print ('Dealer has a Blackjack too! PUSH!')
player.add_winnings('Push', 'Main Wager')
# checking for a bust:
elif player_score_list[0] > 21:
print('BUST! House has won!')
# checking for Dealer's Blackjack:
elif dealer_natural:
print('Dealer has a Blackjack! House has won!')
# checking if an Insurance bet has been placed:
if player.wagers['Insurance'] != EMPTY:
print (f"{player.name}'s Insurance bet has won!")
player.add_winnings('2:1', 'Insurance')
# checking for a Dealer bust:
elif dealer_score > 21:
print (f'DEALER BUST! {player.name} has won!')
player.add_winnings('1:1', 'Main Wager')
# checking if human player's score is greater than dealer's score:
elif player_score_list[0] > dealer_score:
print(f'{player.name} has won!')
player.add_winnings('1:1', 'Main Wager')
# checking for a tie:
elif player_score_list[0] == dealer_score:
print('PUSH!')
player.add_winnings('Push', 'Main Wager')
# checking if dealer's score is greater than human player's score:
elif player_score_list[0] < dealer_score:
print('House has won!')
# more than 1 hand has been played:
else:
if dealer_natural:
print('Dealer has a Blackjack!')
# checking if an Insurance bet has been placed:
if player.wagers['Insurance'] != EMPTY:
print(f"{player.name}'s Insurance bet has won!")
player.add_winnings('2:1', 'Insurance')
elif dealer_score > 21:
print('DEALER BUST!')
for score, natural, wager in zip (player_score_list, player_natural_list, player_wager_list):
# checking for human player's Blackjack:
if natural:
# checking for Dealer's Blackjack:
if not dealer_natural:
print(f"{player.name}'s {wager} has won 3:2!")
player.add_winnings('3:2', wager)
else:
print(f"PUSH on {player.name}'s {wager}!")
player.add_winnings('Push', wager)
# checking for Dealer's Blackjack:
elif dealer_natural:
print(f"House has won {player.name}'s {wager}!")
# checking for a bust:
elif score > 21:
print(f"BUST on {player.name}'s {wager}!")
# checking for a Dealer bust:
elif dealer_score > 21:
print (f"{player.name}'s {wager} has won!")
player.add_winnings('1:1', wager)
# checking if human player's score is greater than dealer's score:
elif score > dealer_score:
print(f"{player.name}'s {wager} has won!")
player.add_winnings('1:1', wager)
# checking for a tie:
elif score == dealer_score:
print(f"PUSH on {player.name}'s {wager}!")
player.add_winnings('Push', wager)
# checking if dealer's score is greater than human player's score:
elif score < dealer_score:
print(f"House has won {player.name}'s {wager}!")
if __name__ == '__main__':
# game setup:
print('Welcome to the Blackjack Game!')
# asking player's name:
plr_name = ask_players_name()
# creating a HumanPlayer object representing the player:
plr = HumanPlayer(plr_name)
# creating a Player object representing the dealer:
dlr = Player()
play_again = True
need_more_chips = True
first_round = True
# game cycle:
while play_again:
# checking if player needs more chips:
if need_more_chips:
if not first_round and plr.chips == EMPTY:
print('\n'*100)
print(f'\n{plr.name} has no chips left!')
else:
print('\n'*100)
#asking player to purchase chips:
plr.get_chips()
first_round = False
#asking to press enter to continue:
press_enter_to_continue()
else:
pass
# shuffling the deck and placing a bet:
print('\n'*100)
print('New round!')
playing_deck = Deck()
playing_deck.shuffle()
plr.place_bet('Main Wager')
# asking to press enter to continue:
press_enter_to_continue()
# creating a "Normal" Hand object for player:
plr.start_hand()
# creating a "Normal" Hand object for dealer:
dlr.start_hand()
# dealing initial 2 cards to both player and dealer:
plr.hand.add_card_from_deck(playing_deck)
dlr.hand.add_card_from_deck(playing_deck)
plr.hand.add_card_from_deck(playing_deck)
dlr.hand.add_card_from_deck(playing_deck)
# playing a round:
#player's turn:
plr_scores, plr_naturals, plr_wagers, surrender = players_turn(plr, dlr, playing_deck)
# checking for surrender:
if not surrender:
# dealer's turn:
dlr_score, dlr_natural = dealers_turn(dlr, plr, plr_scores, plr_naturals, playing_deck)
# checking the outcome and adding winnings:
check_outcome_and_add_winnings(plr, plr_scores, plr_naturals, plr_wagers, dlr_score, dlr_natural)
else:
# clearing the screen:
print('\n' * 100)
# showing dealer's cards face up:
dlr.hand.display_face_up('Dealer')
# showing human player's cards face up:
plr.hand.display_face_up(plr.name)
print('HAND SURRENDERED!')
print('\n')
# displaying the updated human player's chips:
plr.display_chips('Chips')
# cleanup after a finished round:
plr.clear_wager()
plr.split_hands = []
# checking if player's got any chips, or sufficient bankroll to purchase chips:
if plr.chips == EMPTY and plr.bankroll < 1:
break
else:
# asking if player wants to play again:
play_again = replay()
if play_again:
# checking if cheque change is needed:
exchange_possible, high_value_color = plr.cheque_change_possible()
if exchange_possible:
if cheque_change_requested(high_value_color):
plr.cheque_change(high_value_color)
# checking if player needs more chips:
if plr.chips == EMPTY:
need_more_chips = True
elif plr.bankroll < 1:
need_more_chips = False
else:
need_more_chips = more_chips_requested()
#if the player has run out of both chips and bankroll, or doesn't want to replay:
print(f"Game over! {plr.name}'s bankroll: {(plr.bankroll + plr.chips['Amount']):7.2f}")
| mvyushko/blackjack_surrender_game | gameplay.py | gameplay.py | py | 23,132 | python | en | code | 0 | github-code | 13 |
31126458194 | from rest_framework.viewsets import ModelViewSet
from rest_framework.permissions import IsAuthenticated, SAFE_METHODS
from rest_framework.response import Response
from rest_framework.status import HTTP_200_OK, HTTP_400_BAD_REQUEST, HTTP_403_FORBIDDEN, HTTP_201_CREATED
from rest_framework.decorators import action
from rest_framework.generics import RetrieveUpdateDestroyAPIView, CreateAPIView
from django.utils import timezone
from django.shortcuts import get_object_or_404
from policies.models import Claim, ClaimApproval, ClaimComment, Policy
from policies.claims.models import ClaimView
from policies.claims.serializers import ClaimSerializer, ClaimApprovalSerializer, ClaimEvidenceSerializer, FullClaimSerializer, ClaimViewSerializer, ClaimCommentSerializer
from policies.claims.permissions import InClaimPod, InClaimApprovalPod, IsNotClaimant, IsCommentOwner
from policies.claims.approvals import conditionally_create_claim_approvals, conditionally_approve_claim
class ClaimViewSet(ModelViewSet):
queryset = Claim.objects.all()
serializer_class = ClaimSerializer
permission_classes = [IsAuthenticated & InClaimPod]
def get_serializer_class(self):
if self.request.method in SAFE_METHODS:
return FullClaimSerializer
return ClaimSerializer
def create(self, request, *args, **kwargs):
serializer = self.get_serializer(data=request.data)
serializer.is_valid(raise_exception=True)
self.perform_create(serializer)
headers = self.get_success_headers(serializer.data)
# return a more full claim object, complete with evidence
return Response(FullClaimSerializer(serializer.instance).data, status=HTTP_201_CREATED, headers=headers)
def perform_create(self, serializer):
policy = Policy.objects.get(id=self.kwargs["policy_pk"])
claim = serializer.save(policy=policy)
conditionally_create_claim_approvals(claim)
@action(detail=True, methods=["post"])
def payout(self, request, policy_pk=None, pk=None):
# a route that only the escrow agent can call
# pays out the claim (and deducts from the policy reserves)
# also maybe sends an email in the future
claim = self.get_object()
policy: Policy = claim.policy
if policy.escrow_manager != request.user:
return Response(
{"error": "Only the escrow manager can payout claims"},
status=HTTP_403_FORBIDDEN,
)
if claim.is_approved():
if claim.paid_on:
return Response(
{"error": "This claim has already been paid out"},
status=HTTP_400_BAD_REQUEST,
)
policy.pool_balance -= claim.amount
policy.save()
claim.paid_on = timezone.now()
claim.save()
return Response(data={"message": "Claim paid out", "claim": ClaimSerializer(claim).data}, status=HTTP_200_OK)
return Response(data={"message": "Claim not approved, cannot pay out"}, status=HTTP_400_BAD_REQUEST)
class ClaimApprovalViewSet(RetrieveUpdateDestroyAPIView):
serializer_class = ClaimApprovalSerializer
permission_classes = [IsAuthenticated & InClaimApprovalPod & IsNotClaimant]
def get_queryset(self):
return ClaimApproval.objects.filter(approver=self.request.user)
def perform_update(self, serializer):
approval = serializer.save()
claim = approval.claim
conditionally_approve_claim(claim)
class ClaimEvidenceAPIView(CreateAPIView):
'''
Frontend workflow necessitates that we create image assets for the claim before we can create the claim itself
Clients will create images first (which is linked to the photo upload) and then attach them to the claim
'''
serializer_class = ClaimEvidenceSerializer
permission_classes = [IsAuthenticated & InClaimPod]
def perform_create(self, serializer):
policy = get_object_or_404(Policy, pk=self.kwargs["policy_pk"])
return serializer.save(policy=policy, owner=self.request.user)
class ClaimCommentsViewSet(ModelViewSet):
permission_classes = [IsAuthenticated & InClaimPod & IsCommentOwner]
serializer_class = ClaimCommentSerializer
def perform_create(self, serializer):
claim = get_object_or_404(Claim, pk=self.kwargs["claim_pk"])
return serializer.save(claim=claim, commenter=self.request.user)
def get_queryset(self):
return ClaimComment.objects.filter(claim__id=self.kwargs["claim_pk"])
class ClaimViewModelViewSet(ModelViewSet):
queryset = ClaimView.objects.all()
serializer_class = ClaimViewSerializer
permission_classes = [IsAuthenticated & InClaimPod]
def get_queryset(self):
return ClaimView.objects.filter(claim__id=self.kwargs["claim_pk"])
def perform_create(self, serializer):
serializer.save(viewer=self.request.user)
| daxaxelrod/open_insure | policies/claims/views.py | views.py | py | 4,987 | python | en | code | 33 | github-code | 13 |
26525361042 | from .models import Inquiry
def get_business_details(request,user_id,sentence):
reply_dict={}
business_details = request.session.get('business_details',False)
platform= request.session.get('platform',False)
budget = request.session.get('budget',False)
business_type = request.session.get('business_type',False)
print("In Business Details")
try:
if not business_type:
app_type_list = ["I need a Web Application","I need a Mobile Application","I need both Web and Mobile Application"]
inq = Inquiry.objects.get(pk=user_id)
inq.business = sentence
inq.save()
request.session['business_type'] = True
reply_dict.update({"Bot":"Great!. Well what kind of application yopu looking for ?"})
reply_dict.update({"Bot_C":app_type_list})
elif not platform:
print("Platform is ",sentence)
inq = Inquiry.objects.get(pk=user_id)
inq.app_type=sentence
inq.save()
request.session['platform'] = True
reply_dict.update({"Bot":"How much has been budgeted for this project"})
elif not budget:
inq = Inquiry.objects.get(pk=user_id)
inq.budget=sentence
inq.save()
print("Budget is",sentence)
reply_dict.update({'Bot': "Thank you for your interest. Our customer executive will contact you soon. Meanwhile you can take a look into our company's portfolio at "})
reply_dict.update({"Bot__":"https://www.weinsoft.in/portfolio"})
request.session['business_details'] = True
request.session['user_interest']=True
except Exception as e:
print(e)
reply_dict.update({"Message":"Error"})
return reply_dict
| arunraj753/chatbot | chat/extra_cust.py | extra_cust.py | py | 1,641 | python | en | code | 0 | github-code | 13 |
14957774620 | from django.urls import path
from . import views
# . : 현재 디렉토리(blog->urls)
# 경로 urls.py -> index.py
urlpatterns = [
path('search/<str:q>/',views.PostSearch.as_view()), # 글 검색하기
path('delete_comment/<int:pk>/', views.delete_comment),
path('update_comment/<int:pk>/', views.CommentUpdate.as_view()),
path('update_post/<int:pk>/', views.PostUpdate.as_view()),
path('create_post/', views.PostCreate.as_view()),
path('tag/<str:slug>/',views.tag_page),
path('<int:pk>/',views.PostDetail.as_view()),
# path('<int:pk>/',views.single_post_page), 사용 안 함(PostDetail로 변경되었음)
path('category/<str:slug>/',views.category_page),
# views에 있는 category_page로 이동시킴
# https://project-ztsct.run.goorm.io/blog/category/programming
# programming만 분리해서 view.py의 category_page로 보냄
path('<int:pk>/new_comment/', views.new_comment),
# path('', views.index), Class 형태로 변환되었기 때문에 주석 처리한다.
path('',views.PostList.as_view()),
# post_list.html 형태로 만든다.
]
| Sgkeoi/Goorm_Django | blog/urls.py | urls.py | py | 1,136 | python | ko | code | 0 | github-code | 13 |
23871390903 | import requests
from bs4 import BeautifulSoup
import random as r
from webbrowser import open_new_tab
import spotipy
from spotipy.oauth2 import SpotifyOAuth
a=input("Enter the date you wanna search the songs for in yyyy-mm-dd format")
query="https://www.billboard.com/charts/hot-100/"+a+"/"
res=requests.get(query)
content=res.text
soup=BeautifulSoup(content,'html.parser')
titles=soup.select(selector="li>#title-of-a-story")
open("songs.txt","w").close()
for i in titles:
with open("songs.txt","a") as f:
f.write(i.text.replace("\n","").replace("\t","")+"|")
with open("songs.txt","r") as f:
arr=f.read().split("|")
open("songs.txt","w").close()
with open("songs.txt","a") as f:
for i in arr:
f.write(i+"\n")
with open("songs.txt","r") as f:
k=f.read().splitlines()
str=k[r.randint(0,101)]
query="https://www.youtube.com/results?search_query="+str
print(query)
open_new_tab(query)
| sandy-iiit/beautifulsoup_projects | bs4-start/best_songs_based_on_date.py | best_songs_based_on_date.py | py | 934 | python | en | code | 0 | github-code | 13 |
70074397459 | import pygame, sys
from pygame.locals import *
from personaggio import Personaggio
from ostacolo import Ostacolo
BLACK = (0,0,0)
WHITE = (255,255,255)
BEIGE = (235,235,235)
pygame.init()
#PARAMETRI FINESTRA
screen_height = 400
screen_length = 900
#SETTAGGI BASE FINESTRA
WINDOW_SIZE = (screen_length, screen_height)
screen = pygame.display.set_mode(WINDOW_SIZE, 0, 32)
pygame.display.set_caption("RUN")
screen.fill(WHITE)
#CLOCK PER TEMPORIZZARE IL PROGRAMMA
clock = pygame.time.Clock()
fps = 120
#SFODNO CIELO
cielo_image= pygame.image.load('cielo2.png')
pos_cielo_x = 0
pos_cielo_y = 0
cielo_image = pygame.transform.scale(cielo_image, (screen_length, screen_height))
#IMMAGINE VITE
vite_image = pygame.image.load('cuore.png')
vite = 3
pos_vite_x = 650
pos_vite_y = 350
vite_image = pygame.transform.scale(vite_image, (50, 50))
#IMMAGINE PERSONAGGIO
pos_personaggio_x = 20
pos_personaggio_y = 220
personaggio = Personaggio(150, 100, 'personaggio.png', pos_personaggio_x, pos_personaggio_y)
velocità_salto = 6
velocità_discesa = 5
salti_max = 40
cont_salto = 0
cont_discesa = 0
first_time = True
#IMMAGINE OSTACOLO
ostacolo_lenght = 100
ostacolo_height = 90
pos_ostacolo_y = 215
ostacolo = Ostacolo(ostacolo_lenght, ostacolo_height, 'cactus.png', 300, pos_ostacolo_y)
ostacolo2 = Ostacolo(ostacolo_lenght, ostacolo_height, 'cactus.png', 600, pos_ostacolo_y)
ostacolo3 = Ostacolo(ostacolo_lenght, ostacolo_height, 'cactus.png', 900, pos_ostacolo_y)
ostacolo4 = Ostacolo(ostacolo_lenght, ostacolo_height, 'cactus.png', 960, pos_ostacolo_y)
velocità_ostacolo = 4
pos_ostacolo_rigenerato = 1000
#IMMAGINE GAME OVER
game_over = pygame.image.load('gameover.png')
pos_over_x = 100
pos_over_y = 30
game_over = pygame.transform.scale(game_over, (700, 400))
invulnerabilità = 80
# Posizione e dimensioni del rettangolo
rectangle_x = 430
rectangle_y = 10
rectangle_width = 900
rectangle_height = 90
punteggio = 0
pause = False
#oggetto font per stampare punteggio
font = pygame.font.Font(None, 36)
bonus = False
#CICLIO FONDAMENTALE
while True:
mouse = pygame.mouse.get_pos()
#CICLIO PER CHIUDERE IL PROGRAMMA QUANDO L'UTENTE VUOLE CHIUDERE LA FINESTRA
for event in pygame.event.get():
if event.type == QUIT:
pygame.quit()
sys.exit()
#movimento personaggio
keys = pygame.key.get_pressed()
if pause == True:
screen.blit(game_over, (pos_over_x, pos_over_y))
pygame.display.flip()
#if che fa ricominciare partita
if keys[pygame.K_SPACE] and pause == True:
pause = False
velocità_salto = 6
velocità_ostacolo = 4
punteggio = 0
if pause == True:
screen.blit(game_over, (pos_over_x, pos_over_y))
pygame.display.flip()
if keys[pygame.K_UP] and cont_salto < salti_max and first_time:
cont_salto += 1
personaggio.rect.y -= velocità_salto
pygame.time.wait(5)
elif cont_salto > 0:
if first_time == True:
cont_discesa = int(cont_salto * velocità_salto / velocità_discesa)
first_time = False
cont_discesa -= 1
if cont_discesa <= 0:
personaggio.rect.y = 220
cont_salto = 0
first_time = True
elif cont_discesa > 0:
personaggio.rect.y += velocità_discesa
if ostacolo.rect.x < -120:
ostacolo.rect.x = pos_ostacolo_rigenerato
else:
ostacolo.rect.x -= velocità_ostacolo
if ostacolo2.rect.x < -120:
ostacolo2.rect.x = pos_ostacolo_rigenerato
else:
ostacolo2.rect.x -= velocità_ostacolo
if ostacolo3.rect.x < -120:
ostacolo3.rect.x = pos_ostacolo_rigenerato
else:
ostacolo3.rect.x -= velocità_ostacolo
if ostacolo4.rect.x < -120:
ostacolo4.rect.x = pos_ostacolo_rigenerato
else:
ostacolo4.rect.x -= velocità_ostacolo
#funzione che serve per rigenerare lo schermo ad ogni giro del ciclo
screen.fill(BLACK)
pygame.draw.rect(screen, WHITE, (rectangle_x, rectangle_y, rectangle_width, rectangle_height))
screen.blit(cielo_image, (pos_cielo_x, pos_cielo_y))
if (pygame.sprite.collide_mask(personaggio, ostacolo) or pygame.sprite.collide_mask(personaggio, ostacolo2) or pygame.sprite.collide_mask(personaggio, ostacolo3) or pygame.sprite.collide_mask(personaggio, ostacolo4)) and invulnerabilità == 80:
vite -= 1
invulnerabilità = 0
if invulnerabilità < 80:
invulnerabilità += 1
personaggio.draw(screen)
ostacolo.draw(screen)
ostacolo2.draw(screen)
ostacolo3.draw(screen)
ostacolo4.draw(screen)
if vite == 3:
screen.blit(vite_image, (pos_vite_x, pos_vite_y))
screen.blit(vite_image, (pos_vite_x+ 40, pos_vite_y))
screen.blit(vite_image, (pos_vite_x+ 80 , pos_vite_y))
elif vite == 2:
screen.blit(vite_image, (pos_vite_x, pos_vite_y))
screen.blit(vite_image, (pos_vite_x+ 40, pos_vite_y))
elif vite == 1:
screen.blit(vite_image, (pos_vite_x, pos_vite_y))
if vite == 0:
# screen.fill(BLACK)
screen.blit(game_over, (pos_over_x, pos_over_y))
pygame.display.flip()
pause = True
velocità_ostacolo = 0
velocità_salto = 0
vite = 3
ostacolo.rect.x = 300
ostacolo2.rect.x = 600
ostacolo3.rect.x = 900
ostacolo4.rect.x = 960
#DISPLAY PUNTEGGIO
if pause == False:
punteggio += 1
if bonus == True:
punteggio += 1
bonus_surface = font.render('Bonus!', True, (0,0,0))
aggiunta = len(str(punteggio))
screen.blit(bonus_surface, (60 + aggiunta*5 ,10))
if punteggio % 1000 == 0:
bonus = True
if punteggio % 1000 == 500:
bonus = False
punteggio_surface = font.render(str(punteggio), True, (0,0,0))
screen.blit(punteggio_surface, (10,10))
if pause == False:
pygame.display.flip()
# aspetto il prossimo frame
clock.tick(fps) | caroprosperi6/endless-running-game- | gioco.py | gioco.py | py | 6,223 | python | it | code | 0 | github-code | 13 |
22843668525 | from lr.conflict import ConflictMap
def test_conflict():
con = ConflictMap([1, 2, 3])
con.add(1, 'a')
con.add(1, 'b')
con.add(2, 'c')
good, bad = con.finish()
assert good == {2: 'c'}
assert bad == {1: ['a', 'b']}
def test_default():
con = ConflictMap([1, 2, 3])
con.add(1, 'a')
con.add(2, 'c')
con.add_default('d')
good, bad = con.finish()
assert good == {1: 'a', 2: 'c', 3: 'd'}
assert bad == {}
def test_conflict_default():
con = ConflictMap([1, 2, 3])
con.add(1, 'a')
con.add(2, 'c')
con.add_default('d')
con.add_default('e')
good, bad = con.finish()
assert good == {1: 'a', 2: 'c'}
assert bad == {3: ['d', 'e']}
| o11c/lr-parsers | lr/tests/test_conflict.py | test_conflict.py | py | 708 | python | en | code | 1 | github-code | 13 |
38321516764 | import pytest
from pygme.game.board import GameBoard
from pygme.hangman import noose
def test_noose_construction():
""" Tests noose.Noose constructor and noose_components class attribute """
test_board = GameBoard(10, 10, " ")
noose_object = noose.Noose(test_board)
required_parts = ["base", "pole", "top", "rope", "head", "body", "left_leg", "right_leg", "left_arm", "right_arm"]
expected_representations = ["_", "|", "_", "|", "O", "|", "/", "\\", "-", "-"]
# Test that all required parts are present
for part, representation in zip(required_parts, expected_representations):
found = False
for component in noose_object.noose_components:
component_part = component["part"]
if part == component_part.part:
# Ensure the matching part has the expected character representation
assert repr(component_part) == representation and str(component_part) == representation
found = True
assert found
def test_draw():
""" Tests noose.Noose.draw method """
empty_char = " "
test_board = GameBoard(10, 10, empty_char)
noose_object = noose.Noose(test_board)
noose_object.draw()
components_on_grid = set()
for component in noose_object.noose_components:
if component["displayed"]:
components_on_grid.add(repr(component["part"]))
# Ensure that if a component is supposed to be displayed that the board has been refreshed with it
assert test_board.board[component["x_index"]][component["y_index"]] == repr(component["part"])
for column in test_board.board:
for element in column:
# Ensure that no other elements beyond the displayed parts and the empty square character are on the board
assert element == empty_char or element in components_on_grid
def test_is_complete():
""" Tests noose.Noose.is_complete method """
test_board = GameBoard(10, 10, " ")
noose_object = noose.Noose(test_board)
# Initially not all components will be displayed so ensure that the noose is not complete
assert not noose_object.is_complete()
for component in noose_object.noose_components:
component["displayed"] = True
# After forcefully displaying all components though, the noose should be complete
assert noose_object.is_complete()
def test_last_displayed():
""" Tests noose.Noose.get_last_displayed method """
test_board = GameBoard(10, 10, " ")
noose_object = noose.Noose(test_board)
# When first created the noose object should have a pointer to the element right after the last displayed
assert noose_object.get_last_displayed() == noose_object.next_piece - 1
idx = 0
for component in noose_object.noose_components:
if component["displayed"]:
idx += 1
else:
break
assert noose_object.get_last_displayed() == idx - 1
def test_update():
""" Tests noose.Noose.update method """
test_board = GameBoard(10, 10, " ")
noose_object = noose.Noose(test_board)
previous_pointer = noose_object.next_piece
assert not noose_object.is_complete()
for i in range(pytest.large_iteration_count):
noose_object.update()
current_pointer = noose_object.next_piece
if current_pointer < len(noose_object.noose_components):
assert current_pointer == previous_pointer + 1
previous_pointer = current_pointer
assert previous_pointer == current_pointer and noose_object.is_complete()
| adaros92/pygme | test/hangman/test_noose.py | test_noose.py | py | 3,551 | python | en | code | 0 | github-code | 13 |
14331402747 | import json
import pytest
from flask import Flask
from main import app
# Créez un client de test Flask pour interagir avec l'application
@pytest.fixture
def client():
app.testing = True
return app.test_client()
# Testez la route d'accueil
def test_hello_world(client):
response = client.get('/')
assert response.status_code == 200
assert b"Bienvenue sur l'application de gestion" in response.data
# Testez la route de liste des machines (GET)
#def test_liste_machine_get(client):
# response = client.get('/machines')
# assert response.status_code == 200
# data = json.loads(response.data)
# assert "machines" in data
# Testez la route de liste des machines (POST)
#def test_liste_machine_post(client):
# data = {
# "name": "Machine1",
# "type": "Type1"
# }
# response = client.post('/machines', json=data)
# assert response.status_code == 200
# data = json.loads(response.data)
# assert "machine" in data
# Testez la route de détails d'une machine (GET)
def test_get_machine(client):
response = client.get('/machines/Machine1')
assert response.status_code == 200
data = json.loads(response.data)
assert "machine" in data
# Testez la route de modification d'une machine (PUT)
def test_edit_machine(client):
data = {
"name": "Machine1",
"type": "Type2"
}
response = client.put('/machines/Machine1', json=data)
assert response.status_code == 200
data = json.loads(response.data)
assert "machine" in data
# Testez la route de suppression d'une machine (DELETE)
def test_delete_machine(client):
response = client.delete('/machines/Machine1')
assert response.status_code == 200
data = json.loads(response.data)
assert "machine" in data
| Walkways/TP3 | python-api-handle-it/app/test_my_module.py | test_my_module.py | py | 1,775 | python | en | code | 0 | github-code | 13 |
30927831375 | import argparse
import datetime
import json
import math
import os
import random
import time
from pathlib import Path
import numpy as np
import ruamel.yaml as yaml
import torch
import torch.backends.cudnn as cudnn
import torch.distributed as dist
import utils
from dataset import create_dataset, create_sampler, create_loader
from dataset.utils import collect_tensor_result, grounding_eval_bbox, grounding_eval_bbox_vlue
from models.model_bbox import XVLM
from models.tokenization_bert import BertTokenizer
from models.tokenization_roberta import RobertaTokenizer
from optim import create_optimizer
from refTools.refer_python3 import REFER
from scheduler import create_scheduler
from utils.hdfs_io import hmkdir, hcopy, hexists
def train(model, data_loader, optimizer, tokenizer, epoch, device, scheduler, config):
model.train()
metric_logger = utils.MetricLogger(delimiter=" ")
metric_logger.add_meter('lr', utils.SmoothedValue(window_size=1, fmt='{value:.6f}'))
metric_logger.add_meter('loss_bbox', utils.SmoothedValue(window_size=1, fmt='{value:.4f}'))
metric_logger.add_meter('loss_giou', utils.SmoothedValue(window_size=1, fmt='{value:.4f}'))
header = 'Train Epoch: [{}]'.format(epoch)
print_freq = 50
step_size = 100
for i, (image, text, target_bbox) in enumerate(metric_logger.log_every(data_loader, print_freq, header)):
image = image.to(device, non_blocking=True)
text_input = tokenizer(text, padding='longest', max_length=config['max_tokens'], return_tensors="pt").to(device)
target_bbox = target_bbox.to(device)
_, loss_bbox, loss_giou = model(image, text_input.input_ids, text_input.attention_mask, target_bbox=target_bbox)
loss = loss_bbox + loss_giou
optimizer.zero_grad()
loss.backward()
optimizer.step()
scheduler.step()
metric_logger.update(loss_bbox=loss_bbox.item())
metric_logger.update(loss_giou=loss_giou.item())
metric_logger.update(lr=optimizer.param_groups[0]["lr"])
# gather the stats from all processes
metric_logger.synchronize_between_processes()
print("Averaged stats:", metric_logger.global_avg())
return {k: "{:.5f}".format(meter.global_avg) for k, meter in metric_logger.meters.items()}
def val(model, data_loader, tokenizer, device):
model.eval()
metric_logger = utils.MetricLogger(delimiter=" ")
header = 'Evaluation:'
print_freq = 50
result = []
for image, text, ref_ids in metric_logger.log_every(data_loader, print_freq, header):
image = image.to(device)
text_input = tokenizer(text, padding='longest', return_tensors="pt").to(device)
with torch.no_grad():
outputs_coord = model(image, text_input.input_ids, text_input.attention_mask, target_bbox=None)
assert len(ref_ids) == outputs_coord.shape[0]
for r_id, coord in zip(ref_ids, outputs_coord):
result.append({'ref_id': r_id.item(), 'pred': coord})
return result
def main(args, config):
utils.init_distributed_mode(args)
device = torch.device(args.device)
world_size = utils.get_world_size()
if world_size > 8:
assert hexists(args.output_hdfs) and args.output_hdfs.startswith('hdfs'), "for collect_result among nodes"
if args.bs > 0:
config['batch_size'] = args.bs // world_size
seed = args.seed + utils.get_rank()
torch.manual_seed(seed)
np.random.seed(seed)
random.seed(seed)
cudnn.benchmark = True
print("Creating dataset")
grd_train_dataset, grd_test_dataset = create_dataset('grounding_bbox', config, args.evaluate)
print("Creating model")
model = XVLM(config=config)
model.load_pretrained(args.checkpoint, config, load_bbox_pretrain=args.load_bbox_pretrain, is_eval=args.evaluate)
model = model.to(device)
print("### Total Params: ", sum(p.numel() for p in model.parameters() if p.requires_grad))
model_without_ddp = model
if args.distributed:
model = torch.nn.parallel.DistributedDataParallel(model, device_ids=[args.gpu])
model_without_ddp = model.module
if config['use_roberta']:
tokenizer = RobertaTokenizer.from_pretrained(config['text_encoder'])
else:
tokenizer = BertTokenizer.from_pretrained(config['text_encoder'])
print("### output_dir, ", args.output_dir, flush=True)
print("### output_hdfs, ", args.output_hdfs, flush=True)
start_time = time.time()
if args.evaluate:
print("Start evaluating")
if args.distributed:
num_tasks = utils.get_world_size()
global_rank = utils.get_rank()
samplers = create_sampler([grd_test_dataset], [False], num_tasks, global_rank)
else:
samplers = [None]
test_loader = create_loader([grd_test_dataset], samplers,
batch_size=[config['batch_size']],
num_workers=[4], is_trains=[False], collate_fns=[None])[0]
result = val(model_without_ddp, test_loader, tokenizer, device)
results = collect_tensor_result(result, filename='grounding_bbox_eval', local_wdir=args.result_dir,
hdfs_wdir=args.output_hdfs,
write_to_hdfs=world_size > 8)
if utils.is_main_process():
if 'vlue_test' in config.keys() and config['vlue_test']:
grounding_acc = grounding_eval_bbox_vlue(results, config['test_file'][0])
else:
# refcoco evaluation tools
refer = REFER(config['refcoco_data'], 'refcoco+', 'unc')
grounding_acc = grounding_eval_bbox(results, refer)
log_stats = {**{f'{k}': v for k, v in grounding_acc.items()}}
print(log_stats)
dist.barrier()
else:
print("Start training")
datasets = [grd_train_dataset, grd_test_dataset]
train_dataset_size = len(grd_train_dataset)
train_batch_size = config['batch_size']
if utils.is_main_process():
print(f"### data {train_dataset_size}, batch size, {train_batch_size} x {world_size}")
if args.distributed:
num_tasks = utils.get_world_size()
global_rank = utils.get_rank()
samplers = create_sampler(datasets, [True, False], num_tasks, global_rank)
else:
samplers = [None, None]
train_loader, test_loader = create_loader(datasets, samplers,
batch_size=[config['batch_size'], config['batch_size']],
num_workers=[4, 4], is_trains=[True, False], collate_fns=[None, None])
arg_opt = utils.AttrDict(config['optimizer'])
optimizer = create_optimizer(arg_opt, model)
arg_sche = utils.AttrDict(config['schedular'])
arg_sche['step_per_epoch'] = math.ceil(train_dataset_size/(train_batch_size*world_size))
lr_scheduler = create_scheduler(arg_sche, optimizer)
max_epoch = config['schedular']['epochs']
best = 0
best_epoch = 0
for epoch in range(0, max_epoch):
if args.distributed:
train_loader.sampler.set_epoch(epoch)
train_stats = train(model, train_loader, optimizer, tokenizer, epoch, device, lr_scheduler, config)
result = val(model_without_ddp, test_loader, tokenizer, device)
results = collect_tensor_result(result, filename='epoch%d' % epoch, local_wdir=args.result_dir, hdfs_wdir=args.output_hdfs,
write_to_hdfs=world_size > 8)
if utils.is_main_process():
# refcoco evaluation tools
refer = REFER(config['refcoco_data'], 'refcoco+', 'unc')
grounding_acc = grounding_eval_bbox(results, refer)
log_stats = {**{f'train_{k}': v for k, v in train_stats.items()},
**{f'{k}': v for k, v in grounding_acc.items()},
'epoch': epoch}
if grounding_acc['val_d'] > best:
save_obj = {
'model': model_without_ddp.state_dict(),
# 'optimizer': optimizer.state_dict(),
# 'lr_scheduler': lr_scheduler.state_dict(),
'config': config,
# 'epoch': epoch,
}
torch.save(save_obj, os.path.join(args.output_dir, 'checkpoint_best.pth'))
best = grounding_acc['val_d']
best_epoch = epoch
with open(os.path.join(args.output_dir, "log.txt"), "a") as f:
f.write(json.dumps(log_stats) + "\n")
dist.barrier()
if utils.is_main_process():
with open(os.path.join(args.output_dir, "log.txt"), "a") as f:
f.write("best epoch: %d" % best_epoch)
os.system(f"cat {args.output_dir}/log.txt")
total_time = time.time() - start_time
total_time_str = str(datetime.timedelta(seconds=int(total_time)))
print('### Time {}'.format(total_time_str))
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--checkpoint', type=str, required=True)
parser.add_argument('--config', type=str, default='configs/Grounding_bbox.yaml')
parser.add_argument('--output_dir', type=str, default='output/refcoco_bbox')
parser.add_argument('--output_hdfs', type=str, default='', help="to collect eval results among nodes")
parser.add_argument('--device', default='cuda')
parser.add_argument('--seed', default=42, type=int)
parser.add_argument('--world_size', default=1, type=int, help='number of distributed processes')
parser.add_argument('--distributed', action='store_false')
parser.add_argument('--dist_url', default='env://', help='url used to set up distributed training')
parser.add_argument('--evaluate', action='store_true')
parser.add_argument('--load_bbox_pretrain', action='store_true')
parser.add_argument('--bs', default=-1, type=int, help="for each gpu, batch_size = bs // num_gpus")
args = parser.parse_args()
config = yaml.load(open(args.config, 'r'), Loader=yaml.Loader)
args.result_dir = os.path.join(args.output_dir, 'result')
Path(args.output_dir).mkdir(parents=True, exist_ok=True)
Path(args.result_dir).mkdir(parents=True, exist_ok=True)
yaml.dump(config, open(os.path.join(args.output_dir, 'config.yaml'), 'w'))
if len(args.output_hdfs):
hmkdir(args.output_hdfs)
main(args, config) | zengyan-97/X-VLM | Grounding_bbox.py | Grounding_bbox.py | py | 10,756 | python | en | code | 411 | github-code | 13 |
16168984975 | #@by mohammadsam ansaripoor
from tkinter import *
from tkinter import font
from tkinter.font import Font
from typing import Sized
from PIL import Image , ImageTk
import tkinter.font as font
from math import *
window = Tk()
window.geometry('1000x800')
window.title('calculator')
window.resizable(width=False,height=False)
window.configure(bg='black')
input_dir = "data\\"
my_image = ImageTk.PhotoImage(Image.open(input_dir + "calculator-3-2.png"))
my_label = Label(image=my_image)
my_label.grid(row=1, column=1)
#import images
photo_mosavi = ImageTk.PhotoImage(Image.open(input_dir + '=.png'))
photo_0 = ImageTk.PhotoImage(Image.open(input_dir + '0.png'))
photo_1 = ImageTk.PhotoImage(Image.open(input_dir + '1.png'))
photo_2 = ImageTk.PhotoImage(Image.open(input_dir + "2.png"))
photo_3 = ImageTk.PhotoImage(Image.open(input_dir + '3.png'))
photo_4 = ImageTk.PhotoImage(Image.open(input_dir + '4.png'))
photo_5 = ImageTk.PhotoImage(Image.open(input_dir + '5.png'))
photo_6 = ImageTk.PhotoImage(Image.open(input_dir + '6.png'))
photo_7 = ImageTk.PhotoImage(Image.open(input_dir + '7.png'))
photo_8 = ImageTk.PhotoImage(Image.open(input_dir + '8.png'))
photo_9 = ImageTk.PhotoImage(Image.open(input_dir + '9.png'))
photo_mines = ImageTk.PhotoImage(Image.open(input_dir + '-.png'))
photo_plus = ImageTk.PhotoImage(Image.open(input_dir + '+.png'))
photo_taghsim = ImageTk.PhotoImage(Image.open(input_dir + '÷.png'))
photo_zarb = ImageTk.PhotoImage(Image.open(input_dir + 'x.png'))
photo_minesplus = ImageTk.PhotoImage(Image.open(input_dir + '+-.png'))
photo_dot = ImageTk.PhotoImage(Image.open(input_dir + 'dot.png'))
photo_delete = ImageTk.PhotoImage(Image.open(input_dir + 'delete.png'))
photo_threepoint = ImageTk.PhotoImage(Image.open(input_dir + '000 (Custom).png'))
photo_clear = ImageTk.PhotoImage(Image.open(input_dir + 'clear.png'))
photo_darsad = ImageTk.PhotoImage(Image.open(input_dir + 'darsad.png'))
photo_xf = ImageTk.PhotoImage(Image.open(input_dir + 'x!.png'))
photo_jazr = ImageTk.PhotoImage(Image.open(input_dir + 'radikal.png'))
photo_p = ImageTk.PhotoImage(Image.open(input_dir + 'pi.png'))
photo_1xom = ImageTk.PhotoImage(Image.open(input_dir + '1x.png'))
photo_xy = ImageTk.PhotoImage(Image.open(input_dir + 'x^y.png'))
photo_x2 = ImageTk.PhotoImage(Image.open(input_dir + 'x^2.png'))
photo_pnr = ImageTk.PhotoImage(Image.open(input_dir + 'p(n,r).png'))
photo_cnr = ImageTk.PhotoImage(Image.open(input_dir + 'c(n,r).png'))
#sizing
my_size = 55
text_font = font.Font(size=my_size)
#input field
mm = 1
mn = 1
shomarande = 1
looker = 2
momayez = 'f'
tak = 2
operation = ''
num_1 = StringVar()
num_2 = StringVar()
result_value = StringVar()
show = ''
k = 1
expersion = ''
expersion2 = ''
input_num_1 = Entry(window,textvariable=num_1,background='lightgray',font=text_font)
input_num_1.place(x=25,y=34,width=531,height=110)
result_field = Entry(window,textvariable=result_value,background='lightgray',font=text_font)
result_field.place(x=562,y=33,width=425,height=110)
flag = 0
list_1 = []
list_2 = []
def set_number(number):
pio = 'yes'
pio2 = 'yes'
global expersion
global expersion2
global mm
global mn
global shomarande
global looker
global momayez
global list_1
global list_2
if flag == 0 :
if len(list_1) == 0 :
pio = 'no'
if len(list_1) != 0 :
pio = 'yes'
if number == 10:
for i in range(3):
expersion = expersion + str(0)
list_1.append(str(0))
num_1.set(expersion)
expersion = str(expersion)
num_1.set(expersion)
elif number == 13 :
list_1 = []
pnj = ['1','3','.','1','4','5','1']
pno = ''
for pk in range(7):
pn = pnj[pk]
pno = pno + pn
list_1.append(pn)
expersion = pno
num_1.set(expersion)
elif number == 11:
k = int(expersion)
k = k * -1
list_1.append(str(k))
expersion = str(k)
num_1.set(expersion)
elif number == 15 :
expersion = expersion + '.'
list_1.append('.')
num_1.set(expersion)
elif number == 19 and pio == 'yes' :
y = ''
tool = len(list_1)
for i in range(len(list_1)-1):
y += list_1[i]
del list_1[tool-1]
expersion = y
num_1.set(expersion)
elif number != 19 :
expersion = expersion + str(number)
list_1.append(str(number))
num_1.set(expersion)
if flag == 1 :
if len(list_2) == 0 :
pio2 = 'no'
if len(list_2) != 0 :
pio2 = 'yes'
if number == 10:
for i in range(3):
expersion2 = expersion2 + str(0)
list_2.append(str(0))
num_2.set(expersion2)
expersion2 = str(expersion2)
num_2.set(expersion2)
elif number == 13 :
list_2 = []
pnj = ['1','3','.','1','4','5','1']
pno = ''
for pk in range(7):
pn = pnj[pk]
pno = pno + pn
list_2.append(pn)
expersion2 = pno
num_2.set(expersion2)
elif number == 11:
k = int(expersion2)
k = k * -1
expersion2 = str(k)
list_2.append(str(k))
num_2.set(expersion2)
elif number == 15 :
expersion2 = expersion2 + '.'
list_2.append('.')
num_2.set(expersion2)
elif number == 19 and pio2 == 'yes' :
y = ''
tool = len(list_2)
for i in range(len(list_2)-1):
y += list_2[i]
if tool != 0 :
del list_2[tool-1]
expersion2 = y
num_2.set(expersion2)
elif number != 19:
expersion2 = expersion2 + str(number)
list_2.append(str(number))
num_2.set(expersion2)
def factoreil(n):
k = 1
while n>0:
k = k * n
n = n - 1
return k
def set_operation(op):
global operation
global show
global flag
global changer
flag = 1
operation = op
global tak
if op == 'x!' or op == '√' or op == '1÷x' or op == 'x^2' :
tak = 0
if operation == 'x!':
result = factoreil(float(num_1.get()))
if result % 1 == 0 :
result = int(result)
result_value.set(result)
if operation == '√':
result = sqrt(float(num_1.get()))
if result % 1 == 0 :
result = int(result)
result_value.set(result)
if operation == '1÷x':
result = 1/(float(num_1.get()))
if result % 1 == 0 :
result = int(result)
result_value.set(result)
if operation == 'x^2':
result = (float(num_1.get())) ** 2
if result % 1 == 0 :
result = int(result)
result_value.set(result)
else:
show = op
result_value.set(show)
global num_2
num_2 = StringVar()
input_num_1 = Entry(window,textvariable=num_2,background='lightgray',font=text_font)
input_num_1.place(x=25,y=34,width=531,height=110)
result_field = Entry(window,textvariable=result_value,background='lightgray',font=text_font)
result_field.place(x=562,y=33,width=425,height=110)
def show_result():
if operation == '%':
a = (float(num_1.get()))/100
result = a * (float(num_2.get()))
if result % 1 == 0 :
result = int(result)
result_value.set(result)
if operation == '-':
result = float(num_1.get()) - float(num_2.get())
if result % 1 == 0 :
result = int(result)
result_value.set(result)
if operation == '+':
result = float(num_1.get()) + float(num_2.get())
if result % 1 == 0 :
result = int(result)
result_value.set(result)
if operation == '×':
result = float(num_1.get()) * float(num_2.get())
if result % 1 == 0 :
result = int(result)
result_value.set(result)
if operation == '÷':
result = float(num_1.get()) / float(num_2.get())
if result % 1 == 0 :
result = int(result)
result_value.set(result)
if operation == '^':
result = float(num_1.get()) ** float(num_2.get())
if result % 1 == 0 :
result = int(result)
result_value.set(result)
if operation == 'pnr':
if float(num_1.get()) < float(num_2.get()):
result = 'invalid input'
else:
a = factoreil(float(num_1.get()))
b = factoreil(float(num_1.get()) - float(num_2.get()))
result = a/b
if result != 'invalid input' and result % 1 == 0 :
result = int(result)
result_value.set(result)
if operation == 'cnr':
if float(num_1.get()) < float(num_2.get()):
result = 'invalid input'
else :
a = factoreil(float(num_1.get()))
b = factoreil((float(num_1.get())) - (float(num_2.get())))
c = factoreil(float(num_2.get()))
result = a/(b*c)
if result != 'invalid input' and result % 1 == 0 :
result = int(result)
result_value.set(result)
def cleaning():
global num_1
global num_2
global expersion
global expersion2
global flag
global result_value
global show
global operation
global list_1
global list_2
list_1 = []
list_2 = []
operation = ''
show = ''
result_value = StringVar()
expersion2 = ''
expersion = ''
flag = 0
num_2 = StringVar()
num_1 = StringVar()
input_num_1 = Entry(window,textvariable=num_1,background='lightgray',font=text_font)
input_num_1.place(x=25,y=34,width=531,height=110)
result_field = Entry(window,textvariable=result_value,background='lightgray',font=text_font)
result_field.place(x=562,y=33,width=425,height=110)
#buttons
button_0 = Button(window,image=photo_0,command=lambda:set_number(0))
button_0.place(x=440,y=650)
button_1 = Button(window,image=photo_1,command=lambda:set_number(1))
button_1.place(x=440.5,y=525)
buttun_2 = Button(window,image=photo_2,command=lambda:set_number(2))
buttun_2.place(x=580.5,y=525)
button_3 = Button(window,image=photo_3,command=lambda:set_number(3))
button_3.place(x=715.5,y=525)
button_4 = Button(window,image=photo_4,command=lambda:set_number(4))
button_4.place(x=440,y=400)
button_5 = Button(window,image=photo_5,command=lambda:set_number(5))
button_5.place(x=580,y=400)
button_6 = Button(window,image=photo_6,command=lambda:set_number(6))
button_6.place(x=715,y=400)
button_7 = Button(window,image=photo_7,command=lambda:set_number(7))
button_7.place(x=440,y=275)
button_8 = Button(window,image=photo_8,command=lambda:set_number(8))
button_8.place(x=580,y=275)
button_9 = Button(window,image=photo_9,command=lambda:set_number(9))
button_9.place(x=715,y=275)
button_mines = Button(window,image=photo_mines,command=lambda:set_operation('-'))
button_mines.place(x=860,y=275)
button_plus = Button(window,image=photo_plus,command=lambda:set_operation('+'))
button_plus.place(x=860,y=400)
button_taghsim = Button(window,image=photo_taghsim,command=lambda:set_operation('÷'))
button_taghsim.place(x=860,y=525)
button_zarb = Button(window,image=photo_zarb,command=lambda:set_operation('×'))
button_zarb.place(x=860,y=650)
button_minesplus = Button(window,image=photo_minesplus,command=lambda:set_number(11))
button_minesplus.place(x=715,y=650)
button_dot = Button(window,image=photo_dot,command=lambda:set_number(15))
button_dot.place(x=580,y=650)
button_delete = Button(window,image=photo_delete,command=lambda:set_number(19))
button_delete.place(x=855,y=175)
button_threepoint = Button(window,image=photo_threepoint,command=lambda:set_number(10))
button_threepoint.place(x=579,y=175)
button_clear = Button(window,image=photo_clear,command=lambda : cleaning())
button_clear.place(x=435,y=175)
button_darsad = Button(window,image=photo_darsad,command=lambda:set_operation('%'))
button_darsad.place(x=277,y=175)
button_xf = Button(window,image=photo_xf,command=lambda:set_operation('x!'))
button_xf.place(x=155,y=175)
button_jazr = Button(window,image=photo_jazr,command=lambda:set_operation('√'))
button_jazr.place(x=30,y=175)
button_p = Button(window,image=photo_p,command=lambda:set_number(13))
button_p.place(x=277,y=342)
button_1xom = Button(window,image=photo_1xom,command=lambda:set_operation('1÷x'))
button_1xom.place(x=155,y=342)
button_xy = Button(window,image=photo_xy,command=lambda:set_operation('^'))
button_xy.place(x=30,y=342)
button_x2 = Button(window,image=photo_x2,command=lambda:set_operation('x^2'))
button_x2.place(x=277,y=505)
button_pnr = Button(window,image=photo_pnr,command=lambda:set_operation('pnr'))
button_pnr.place(x=155,y=505)
button_cnr = Button(window,image=photo_cnr,command=lambda:set_operation('cnr'))
button_cnr.place(x=30,y=505)
button_mosavi = Button(window,image=photo_mosavi,command=lambda:show_result())
button_mosavi.place(x=30,y=650)
window.mainloop()
#@mohammadsam ansaripoor | mohammadsam-programmer/calculator | calculator_2.py | calculator_2.py | py | 13,797 | python | en | code | 0 | github-code | 13 |
71588779217 | import trackpy as tp
import argparse
import os
import pandas as pd
from pathlib import Path
import numpy as np
import napari
from datetime import datetime
def track(platelets):
search_range = 3
linked_pc = tp.link_df(platelets, search_range,
pos_columns=['xs', 'ys', 'zs'],
t_column='t', memory=1)
return linked_pc
def get_platelets_paths(md_path):
meta = pd.read_csv(md_path)
platelets_paths = meta['platelets_info_path'].values
return platelets_paths, meta
def add_track_len(df):
ids = df['particle'].values
track_count = np.bincount(ids)
df['track_no_frames'] = track_count[ids]
return df
def filter_df(df, min_frames=20):
df_filtered = df.loc[df['track_no_frames'] >= min_frames, :]
return df_filtered
def view_tracks(image, labels, df, scale=(1, 4, 1, 1), min_frames=20):
cols = ['particle', 't', 'z_pixels', 'y_pixels', 'x_pixels']
df_filtered = filter_df(df, min_frames=min_frames)
tracks = df_filtered[cols]
v = napari.view_image(image, scale=scale, blending='additive')
v.add_labels(labels, scale=scale, blending='additive')
v.add_tracks(tracks, scale=scale)
napari.run()
def track_from_path(path, out_dir):
os.makedirs(out_dir, exist_ok=True)
df = pd.read_csv(path)
df = track(df)
df = add_track_len(df)
tracks_name = Path(path).stem + '_tracks.csv'
tracks_path = os.path.join(out_dir, tracks_name)
df.to_csv(tracks_path)
return tracks_path
def track_from_df(df, meta, out_dir):
os.makedirs(out_dir, exist_ok=True)
df = track(df)
df = add_track_len(df)
path = meta['platelets_info_path']
tracks_name = Path(path).stem + '_tracks.csv'
tracks_path = os.path.join(out_dir, tracks_name)
df.to_csv(tracks_path)
return tracks_name
if __name__ == '__main__':
md_path = '/home/abigail/data/plateseg-training/timeseries_seg/inj-4-seg-pipeline_segmentation-metadata.csv'
out_dir = '/home/abigail/data/plateseg-training/timeseries_seg'
p_paths, meta = get_platelets_paths(md_path)
t_paths = []
for p in p_paths:
df = pd.read_csv(p)
df = track(df)
df = add_track_len(df)
tracks_name = Path(p).stem + '_tracks.csv'
tracks_path = os.path.join(out_dir, tracks_name)
df.to_csv(tracks_path)
t_paths.append(tracks_path)
meta['tracks_paths'] = t_paths
meta.to_csv(md_path)
| AbigailMcGovern/platelet-segmentation | tracking.py | tracking.py | py | 2,465 | python | en | code | 1 | github-code | 13 |
2244444679 | import cairo
import pytest
@pytest.fixture
def context():
surface = cairo.ImageSurface(cairo.FORMAT_ARGB32, 42, 42)
return cairo.Context(surface)
def test_path():
assert cairo.Path
with pytest.raises(TypeError):
cairo.Path()
def test_path_str(context):
p = context.copy_path()
assert isinstance(p, cairo.Path)
assert str(p) == ""
context.line_to(1, 2)
p = context.copy_path()
assert str(p) == "move_to 1.000000 2.000000"
context.line_to(1, 2)
p = context.copy_path()
assert str(p) == "move_to 1.000000 2.000000\nline_to 1.000000 2.000000"
context.new_path()
context.curve_to(0, 1, 2, 3, 4, 5)
p = context.copy_path()
assert str(p) == (
"move_to 0.000000 1.000000\n"
"curve_to 0.000000 1.000000 2.000000 3.000000 4.000000 5.000000")
context.new_path()
context.line_to(1, 2)
context.close_path()
p = context.copy_path()
assert str(p) == (
"move_to 1.000000 2.000000\n"
"close path\n"
"move_to 1.000000 2.000000")
def test_path_compare_hash(context):
p = context.copy_path()
assert p == p
hash(p)
assert not p != p
assert p != object()
assert not p < p
assert p <= p
assert p >= p
assert not p > p
def test_path_iter(context):
context.line_to(1, 2)
context.line_to(2, 3)
context.curve_to(0, 1, 2, 3, 4, 5)
context.close_path()
p = context.copy_path()
i = iter(p)
assert list(i) == [
(0, (1.0, 2.0)),
(1, (2.0, 3.0)),
(2, (0.0, 1.0, 2.0, 3.0, 4.0, 5.0)),
(3, ()),
(0, (1.0, 2.0)),
]
| pygobject/pycairo | tests/test_path.py | test_path.py | py | 1,638 | python | en | code | 570 | github-code | 13 |
19527492822 | import numpy as np
import matplotlib.pyplot as plt
class NeuralNetwork():
def __init__(self, nn_input_dim = 2, nn_output_dim = 2, reg_lambda = 0.01):
"""
nn_input_dim- input layer dimensionality
nn_output_dim - output layer dimensionality
reg_lambda - regularization strength
"""
self.nn_input_dim = nn_input_dim
self.nn_output_dim = nn_output_dim
self.reg_lambda = reg_lambda
def calculate_loss(self, model, x, y):
"""
Calculate loss after forward propagation"""
W1, b1, W2, b2 = model['W1'], model['b1'], model['W2'], model['b2']
# Forward propagation to calculate our predictions
z1 = x.dot(W1) + b1
a1 = np.tanh(z1)
#a1 = self.sigmoid(z1)
z2 = a1.dot(W2) + b2
exp_scores = np.exp(z2)
# SoftMax (output of the final layer)
probs = exp_scores / np.sum(exp_scores, axis=1, keepdims=True)
# Calculating the loss
corect_logprobs = -np.log(probs[range(len(x)), y])
data_loss = np.sum(corect_logprobs)
# Add regulatization term to loss (optional)
data_loss += self.reg_lambda/2 * (np.sum(np.square(W1)) + np.sum(np.square(W2)))
return 1./len(x) * data_loss
def predict(self, model, x):
W1, b1, W2, b2= model['W1'], model['b1'], model['W2'], model['b2']
# Forward propagation
z1 = x.dot(W1) + b1
a1 = np.tanh(z1)
#a1 = self.sigmoid(z1)
z2 = a1.dot(W2) + b2
exp_scores = np.exp(z2)
probs = exp_scores / np.sum(exp_scores, axis=1, keepdims=True)
return np.argmax(probs, axis=1)
def build_model(self, x, y, nn_hdim, num_passes=20000, print_loss=False, epsilon=0.01):
# Initialize the parameters to random values. We need to learn these.
np.random.seed(0)
W1 = np.random.randn(self.nn_input_dim, nn_hdim) / np.sqrt(self.nn_input_dim)
b1 = np.zeros((1, nn_hdim))
W2 = np.random.randn(nn_hdim, self.nn_output_dim) / np.sqrt(nn_hdim)
b2 = np.zeros((1, self.nn_output_dim))
# This is what we return at the end
model = {}
loss_list=[]
# Gradient descent. For each batch...
for i in range(0, num_passes):
# Forward propagation
z1 = x.dot(W1) + b1
a1 = np.tanh(z1)
#a1=self.sigmoid(z1)
z2 = a1.dot(W2) + b2
exp_scores = np.exp(z2)
probs = exp_scores / np.sum(exp_scores, axis=1, keepdims=True)
# Backpropagation
delta3 = probs
delta3[range(len(x)), y] -= 1
dW2 = (a1.T).dot(delta3)
db2 = np.sum(delta3, axis=0, keepdims=True)
delta2 = delta3.dot(W2.T) * (1 - np.power(a1, 2))
dW1 = np.dot(x.T, delta2)
db1 = np.sum(delta2, axis=0)
# Add regularization terms (b1 and b2 don't have regularization terms)
dW2 += self.reg_lambda * W2
dW1 += self.reg_lambda * W1
# Gradient descent parameter update
W1 += -epsilon * dW1
b1 += -epsilon * db1
W2 += -epsilon * dW2
b2 += -epsilon * db2
# Assign new parameters to the model
model = { 'W1': W1, 'b1': b1, 'W2': W2, 'b2': b2}
# Optionally print the loss.
# This is expensive because it uses the whole dataset, so we don't want to do it too often.
if print_loss and i % 1000 == 0:
l= self.calculate_loss(model, x, y)
print("Loss after iteration %i: %f" %(i, l))
loss_list.append(l)
print(plt.plot(loss_list))
return model | Nusha34/ML-algorithms-from-scratch | ML_algorithms/Neural_Network_Class_without_exercises.py | Neural_Network_Class_without_exercises.py | py | 3,895 | python | en | code | 0 | github-code | 13 |
17588524788 | import logging
import json
from flask import Flask
from flask_cors import CORS
from flask_pymongo import PyMongo
from bson.objectid import ObjectId
mongo = PyMongo()
class JSONEncoder(json.JSONEncoder):
""" extend json-encoder class
"""
def default(self, o):
if isinstance(o, ObjectId):
return str(o)
if isinstance(o, set):
return list(o)
if isinstance(o, datetime.datetime):
return str(o)
return json.JSONEncoder.default(self, o)
def create_app():
""" Creates and initializes a Flask object to be used
"""
app = Flask(__name__)
configure_mongo_uri(app)
CORS(app, supports_credentials=True)
app.json_encoder = JSONEncoder
register_blueprints(app)
return app
def configure_mongo_uri(app):
""" Helper function to configure MongoDB URI
"""
# Setting up configurtion based on environment
app.config.from_pyfile('config.py')
# Connecting Flask App with DB
app.config["MONGO_URI"] = "mongodb+srv://"+app.config["MONGODB_USERNAME"] + \
":"+app.config["MONGODB_PASSWORD"]+"@"+app.config["MONGODB_HOST"]
try:
mongo.init_app(app)
print("MongoDB connected.")
except Exception as e:
print(e)
def register_blueprints(app):
""" Helper function to register all the controllers/API into Flask app object
"""
from api.controllers.sample import sample
from api.controllers.authentication import authentication
from api.controllers.yelp import yelp
from api.controllers.orders import orders
logging.info("Registering blueprints into app.")
app.register_blueprint(sample)
app.register_blueprint(authentication)
app.register_blueprint(yelp)
app.register_blueprint(orders)
return app
if __name__ == '__main__':
app = create_app()
app.run()
| fionamei/chefs-kiss | backend/app.py | app.py | py | 1,870 | python | en | code | 2 | github-code | 13 |
23754558929 | import datetime
from flask import Flask
import unittest
from app import db
from service import user, master, breed, procedure, reservation
class CreateUserTest(unittest.TestCase):
def setUp(self):
"""
Creates a new database for the unit test to use
"""
self.app = Flask(__name__)
db.init_app(self.app)
with self.app.app_context():
db.create_all()
def test_create_reservation(self):
"""
Testing correct reservation creation
"""
user_master = user.create_user("Betty", "Conda", "345", "MASTER", "1232124356")
user_id = user.create_user("Anna", "Dove", "567", "CLIENT", "9878564313").id
master_id = master.create_master(user_master.id).id
breed_id = breed.create_breed("Goldendoodle", 2.3, 1.5, "link").id
procedure_id = procedure.create_procedure("Teeth whitening", 400, 30).id
time_from = datetime.datetime.strptime('12:00', '%H:%M').time()
time_to = datetime.datetime.strptime('13:00', '%H:%M').time()
date = datetime.datetime.strptime('20/02/2022', '%d/%m/%Y').date()
final_price = procedure.compute_total_procedure_price(procedure_id, breed_id)
result = reservation.create_reservation(master_id, user_id, breed_id, procedure_id,
time_from, time_to, date, final_price)
try:
self.assertTrue(result)
self.assertEqual(master_id, result.master_id)
self.assertEqual(user_id, result.client_id)
self.assertEqual(breed_id, result.breed_id)
self.assertEqual(procedure_id, result.procedure_id)
self.assertEqual(1380, result.final_price)
finally:
user.delete_user(user_master.id)
user.delete_user(user_id)
master.delete_master(master_id)
breed.delete_breed(breed_id)
procedure.delete_procedure(procedure_id)
reservation.delete_reservation(result.id)
db.session.commit()
def test_delete_reservation(self):
"""
Tests whether reservation is correctly deleted
"""
user_master = user.create_user("Sindy", "Crow", "345", "MASTER", "3422124354")
user_id = user.create_user("Lisel", "White", "567", "CLIENT", "9876664313").id
master_id = master.create_master(user_master.id).id
breed_id = breed.create_breed("Lil boy", 1.1, 1.2, "link").id
procedure_id = procedure.create_procedure("Paw whitening", 300, 60).id
time_from = datetime.datetime.strptime('11:00', '%H:%M').time()
time_to = datetime.datetime.strptime('12:00', '%H:%M').time()
date = datetime.datetime.strptime('21/02/2022', '%d/%m/%Y').date()
final_price = procedure.compute_total_procedure_price(procedure_id, breed_id)
result = reservation.create_reservation(master_id, user_id, breed_id, procedure_id,
time_from, time_to, date, final_price)
try:
reservation.delete_reservation(result.id)
deleted_reservation_query = reservation.get_reservation_by_id(result.id)
self.assertTrue(result)
self.assertEqual(None, deleted_reservation_query)
finally:
user.delete_user(user_master.id)
user.delete_user(user_id)
master.delete_master(master_id)
breed.delete_breed(breed_id)
procedure.delete_procedure(procedure_id)
reservation.delete_reservation(result.id)
db.session.commit()
# def test_create_test_reservation(self):
# user_master = user.get_user_by_phone("5554445554")
# user_id = user.get_user_by_phone("4445554445").id
# master_id = master.get_master_by_user_id(user_master.id).id
# breed_id = breed.create_breed("Big boy", 2.3, 1.5, "link").id
# procedure_id = procedure.create_procedure("Vse and bystro", 400, 30).id
# time_from = datetime.datetime.strptime('12:00', '%H:%M').time()
# time_to = datetime.datetime.strptime('13:00', '%H:%M').time()
# date = datetime.datetime.strptime('20/02/2022', '%d/%m/%Y').date()
# final_price = procedure.compute_total_procedure_price(procedure_id, breed_id)
# result = reservation.create_reservation(master_id, user_id, breed_id, procedure_id,
# time_from, time_to, date, final_price)
def tearDown(self):
"""
Ensures that the database is emptied for next unit test
"""
self.app = Flask(__name__)
db.init_app(self.app)
with self.app.app_context():
db.drop_all()
| el-shes/grooming_store | tests/test_reservation_service.py | test_reservation_service.py | py | 4,741 | python | en | code | 0 | github-code | 13 |
13264045813 | # Multiple
class ComputerPart():
def __init__(self, pabrikan, nama, jenis, harga):
self.pabrikan = pabrikan
self.nama = nama
self.jenis = jenis
self.harga = harga
class Processor():
def __init__(self, jumlah_core, speed,):
self.jumlah_core = jumlah_core
self.speed = speed
class RandomAccessMemory():
def __init__(self, kapasitas):
self.kapasitas = kapasitas
class HardDiskDATA():
def __init__(self,rpm):
self.rpm = rpm
class Computer( ComputerPart, Processor, RandomAccessMemory, HardDiskDATA ):
def __init__(self, pabrikan, nama, jenis, harga, jumlah_core, speed, kapasitas, rpm):
ComputerPart.__init__(self, pabrikan, nama, jenis, harga)
Processor.__init__(self, jumlah_core, speed, )
RandomAccessMemory.__init__(self, kapasitas)
HardDiskDATA.__init__(self, rpm)
pcsultan= Computer('INTEL', 'ASUS ROG', 'CORE I9', 1000, 8, '7Ghz', '1000GB', '7200 RPM')
pcdewa = Computer('Intel', 'Lenovo Legion','CORE I7', 1500, 6, '6Ghz', '500GB', '7200 RPM')
pcmahasiswa= Computer('INTEL', 'Ideapad Gaming Budget', 'CORE I5', 4, 3000, '5Ghz', '100GB', '7200 RPM')
u = [pcsultan,pcdewa,pcmahasiswa]
for o in u:
print('Pabrikan: {}\nNama: {}\nJenis: {}\nJumlah_core: {}\nSpeed: {}\nKapasitas: {}\nRPM : {} \n\n'.format(o.pabrikan,o.nama,o.harga,o.jumlah_core,o.speed,o.kapasitas,o.rpm))
| zaedarghazalba/TUGAS-PBOP | TUGAS 5 PBOP/TUGASmultiplecomp.py | TUGASmultiplecomp.py | py | 1,486 | python | en | code | 0 | github-code | 13 |
26617585002 | import argparse
if __name__ == '__main__':
# argument parsing to grab input file
parser = argparse.ArgumentParser(description="Process a list of sea floor depths")
required = parser.add_argument_group("required arguments")
required.add_argument("-i", "--input_file", help="path to the input file", required=True)
args = parser.parse_args()
if args.input_file is None:
print("No input file passed in")
exit(1)
try:
input = open(args.input_file, "r")
except:
print("Input file path '%s' is invalid" % args.input_file)
exit(1)
# go through depth by depth and find number of increases
lines = input.readlines()
prev_depth = None
num_increasing_depths = 0
for line in lines:
depth = int(line)
if prev_depth is None:
prev_depth = depth
continue
if prev_depth < depth:
num_increasing_depths += 1
prev_depth = depth
print("Answer:", num_increasing_depths)
exit(0)
| gmurr20/advent_of_code_2021 | day1/day1_p1.py | day1_p1.py | py | 1,032 | python | en | code | 0 | github-code | 13 |
36565923420 | from django.shortcuts import render,redirect
from .models import Comment
from django.contrib.contenttypes.models import ContentType
# Create your views here.
def comment(request):
user=request.user
text=request.POST.get('text','')
content_type=request.POST.get('content_type','')
object_id=int( request.POST.get('object_id',''))
#通过contenttypes得到所关联的对象
model_class=ContentType.objects.get(model=content_type).model_class()
model_obj=model_class.objects.get(pk=object_id)
cmt=Comment()
cmt.user=user
cmt.text=text
cmt.content_type=content_type
cmt.object_id=object_id
cmt.content_object=model_obj
cmt.save()
return redirect(request.META.get('HTTP_REFERER')) | changfengwangluo/zhanku | comment/views.py | views.py | py | 739 | python | en | code | 0 | github-code | 13 |
71135477139 | try:
import qi
from naoqi import ALProxy
except:
print('Not on real Robot')
import argparse
import sys
import time
from PIL import Image
import numpy as np
import cv2 as cv
import copy
import ffmpeg
import math
from PIL import ImageFont, ImageDraw, Image
import zmq
import json, ast
context = zmq.Context()
socket = context.socket(zmq.REQ)
socket.connect("tcp://localhost:5555")
def send_array(socket, A, flags=0, copy=True, track=False):
"""send a numpy array with metadata"""
md = dict(
dtype = str(A.dtype),
shape = A.shape,
)
socket.send_json(md, flags|zmq.SNDMORE)
return socket.send(A, flags, copy=copy, track=track)
def recv_array(socket, flags=0, copy=True, track=False):
"""recv a numpy array"""
md = socket.recv_json(flags=flags)
msg = socket.recv(flags=flags, copy=copy, track=track)
#import ipdb;ipdb.set_trace()
#buf = memoryview(msg)
#something wrong with md key name -> u'key1'
A = np.frombuffer(msg, dtype=np.float32)
return A.reshape(4)
def detect_keypoint_yolo(frame):
global socket
send_array(socket, frame)
keypoint= recv_array(socket)
if keypoint[0]==-100:
return frame, None, None
confidence = keypoint
center = copy.deepcopy(keypoint[:2])
center[1]=center[1] * frame.shape[0]
center[0]=center[0] * frame.shape[1]
#print(center)
r = int(abs(keypoint[-1])*frame.shape[0])
return frame,center.astype(np.int), r
def detect_keypoint(frame):
frame = cv.cvtColor(frame, cv.COLOR_RGB2BGR)
l_red_lower = np.array([2,150,71])
l_red_upper = np.array([23,255,1715])
#frame = cv.cvtColor(frame, cv.COLOR_BGR2RGB)
#import ipdb;ipdb.set_trace()
#u_red_lower = np.array([0,170,120])
#u_red_upper = np.array([35,255,245])
erosion_size=1
erosion_shape=cv.MORPH_RECT
kernel_size = 3
kernel = np.ones((kernel_size, kernel_size)).astype(np.uint8)
floodflags = 8
floodflags |= cv.FLOODFILL_FIXED_RANGE
previous_track = None
blurred = cv.GaussianBlur(frame, (3,3), 0)
img_hsv = cv.cvtColor(blurred, cv.COLOR_BGR2HSV)
img_gray = cv.cvtColor(img_hsv, cv.COLOR_BGR2GRAY)
#img_gray = cv.GaussianBlur(img_gray, (kernel_size, kernel_size), 0)
ret,thresh1 = cv.threshold(img_gray,131,255,cv.THRESH_BINARY)
#green_mask = cv.inRange(img_hsv, low_green, high_green)
#green_mask = 255-green_mask
mask=cv.inRange(img_hsv, l_red_lower, l_red_upper)
#if i==9:
#import ipdb;ipdb.set_trace()
#mask_u=cv.inRange(img_hsv, u_red_lower, u_red_upper)
#mask = cv.Canny(blurred,100,200)#mask_l #+ mask_u
tmp = mask.copy()
#mask = cv.erode(mask.astype(np.uint8), kernel, iterations=2)
mask_ = np.zeros((frame.shape[0], frame.shape[1]),np.uint8)
contour = []
contours,hierarchy = cv.findContours(mask, cv.RETR_TREE, cv.CHAIN_APPROX_SIMPLE)
radius=0
if len(contours)>0:
contours = contours[0]
#print('contour',contour)
#print('ctr', contours)
(x,y), radius = cv.minEnclosingCircle(contours)
center = (int(x), int(y))
radius = int(radius)
else:
center=None
radius = None
cv.imwrite('test.jpg', img_gray)
cv.imwrite('mask.jpg', mask)
return frame, center, radius
def load_imgs(int1=1, int2=100, parent_folder='./datasets/rgb', name='farbe'):
loaded = []
for i in range(int1, int2):
#import ipdb;ipdb.set_trace()
loaded.append(cv.imread(parent_folder+'/'+name+str(i)+'.jpg'))
return loaded
def convert_centers2out(c, tmp_c):
#see where at which pixel the ball will roll out
#d = c-tmp_c -> movement vector
#scalar where the movement vecot hits image border S= (image_height - C.x)/d.x
#scale d.y and add to current ball location C.y + d.y *scale
dy = c[0]-tmp_c[0]
dx = c[1]-tmp_c[1]
#x is down
if dx==0:
dx+=1e-4
scalar = (600-c[1])/( dx)
print(scalar)
if scalar<0:
scalar=1e9
out_pixel = c[0]+(dy * scalar)
#print('s',scalar,' cutofy', cut_of_y,'d ', (dx,dy), ' C', c )
d = np.array([dx,dy])
d = d / np.linalg.norm(d)
angle = np.arctan2(d[1],d[0])
angle = angle*180/math.pi
left_right = angle
norm = np.sqrt((dx)**2 + (dy)**2)
return out_pixel, angle, dx, dy, norm
def output_pixel2decision(cut_of_y, r):
#cut_of_y = output pixel
#r radius
angle_ = '------'
if cut_of_y>=320 and cut_of_y<=470:
angle_='middle'
elif cut_of_y>470 and cut_of_y<2000:
angle_='right'
elif cut_of_y<320 and cut_of_y>-2000:
angle_='left'
return angle_ | PatrickLowin/RoboCup | utils.py | utils.py | py | 4,689 | python | en | code | 0 | github-code | 13 |
42271967475 | from config import setSelenium, init_crawler,init_parser
from utils import JSONtoExcel, save_to_json, format_text
from current_time import *
from time import sleep
import string
from selenium import webdriver
from selenium.common.exceptions import NoSuchElementException, ElementClickInterceptedException, TimeoutException
global driver
extracted_info = []
def dynamic_html(url):
driver = setSelenium(True)
try:
driver.get(url)
except TimeoutException:
print('Pagina não carregada...')
return False
sleep(10)
results_div = driver.find_element_by_class_name('shopping-template-default')
return results_div.get_attribute('outerHTML')
def load_button(url):
driver = setSelenium(False)
driver.get(url)
try:
for _ in range(0,6):
driver.execute_script("window.scrollTo(0, document.body.scrollHeight / 1.5)")
sleep(15)
driver.find_element_by_id('loadMoreShopping').click()
print('Mais shoppings encontrado!')
if driver.find_element_by_id('loadMoreShopping').click():
driver.execute_script("window.scrollTo(0, document.body.scrollHeight / 1.4)")
html = driver.find_element_by_tag_name('body')
return html.get_attribute('outerHTML')
except NoSuchElementException:
print('não tem mais shoppings!')
html = driver.find_element_by_tag_name('body')
return html.get_attribute('outerHTML')
except ElementClickInterceptedException:
print('Botão não localizado')
html = driver.find_element_by_tag_name('body')
return html.get_attribute('outerHTML')
def extract_shoppings(target_url):
# Array que irá pegar todas informações extraídas
data = []
URL = target_url
# Selenium
html = load_button(URL)
# Iniciar o crawler
try:
crawler = init_parser(html)
except TypeError:
print('pagina não encontrada')
shoppings = crawler.find_all('div', id="ajax-content-shoppings")
print('Localizando os links...')
for shopping in shoppings:
shopping_link = shopping.find_all('a')
for link in shopping_link:
# print(f"https://abrasce.com.br/{link['href']}")
with open('links.txt', 'a') as file:
file.write(f"{link['href']}\n")
print(f'Finalizado! links extraidos!')
print('Extração dos links completada...')
def read_link():
print('Iniciando leitura de links...')
with open('links.txt', 'r') as text:
return text.readlines()
def extract_details():
print(f"{len(read_link())} links achados")
contador = 1
for shopping_page in read_link():
print(f'Extraindo {contador} link')
details = {}
# inicializar drivers
dynamic_result = dynamic_html(shopping_page)
if dynamic_html == False:
extracted_info.append(details)
save_to_json(details)
continue
crawler = init_parser(dynamic_result)
details['Nome'] = crawler.find('span', class_="post post-shopping current-item").text
details['Tipo'] = crawler.find('a', class_="taxonomy operacao").text
details['link'] = shopping_page
details_container = crawler.find('div',class_="specs")
# PERFIL DE CONSUMIDORES
perfil_title = details_container.find(text="PERFIL DE CONSUMIDORES")
class_content = perfil_title.findNext('div')
class_perfil = []
for p in class_content.find_all('p'):
class_perfil.append(p.text)
details['Classe A'] = class_perfil[0]
details['Classe B'] = class_perfil[1]
details['Classe C'] = class_perfil[2]
details['Classe D'] = class_perfil[3]
# details[perfil_title] = format_text(class_content.text)
# ENTRETENIMENTO
enterteiment_title = details_container.find(text="ENTRETENIMENTO")
enterteiment_content = enterteiment_title.findNext('div')
# print(enterteiment_title)
details[enterteiment_title] = format_text(enterteiment_content.text)
# ÁREA TOTAL DO TERRENO
area_title = details_container.find(text="ÁREA TOTAL DO TERRENO")
area_content = area_title.findNext('div')
# print(area_title)
details[area_title] = format_text(area_content.text)
# CONTATO
contact_title = details_container.find(text="CONTATO")
contact_content = contact_title.findNext('ul')
# print(contact_title)
details[contact_title] = format_text(contact_content.text)
# Icones
aditional_info = crawler.find('div', class_="icons shoppings mt-4 mb-4")
box = aditional_info.find_all('div', class_="box")
for box_info in box:
title = box_info.find('p', class_='mb-0')
detail_content = box_info.find('p', class_="number")
details[title.text] = detail_content.text
extracted_info.append(details)
contador += 1
print('Finalizado!')
print('Salvando em json...')
save_to_json(extracted_info)
print('Finalizado...')
def main():
# for page in string.ascii_uppercase:
# extract_shoppings(f"https://abrasce.com.br/guia-de-shoppings/?letter={page}")
# extract_shoppings("https://abrasce.com.br/guia-de-shoppings/strip-mall/",)
# extract_shoppings("https://abrasce.com.br/guia-de-shoppings/outlet-center/")
extract_details()
JSONtoExcel()
if __name__ == "__main__":
start = timeit.default_timer()
try:
main()
tempo_estimado(start)
except KeyboardInterrupt:
save_to_json(extracted_info)
tempo_estimado(start)
except Exception as error:
save_to_json(extracted_info)
tempo_estimado(start)
raise
| mx-jeff/abrasce-crawler | app.py | app.py | py | 5,929 | python | en | code | 0 | github-code | 13 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.