seq_id stringlengths 4 11 | text stringlengths 113 2.92M | repo_name stringlengths 4 125 ⌀ | sub_path stringlengths 3 214 | file_name stringlengths 3 160 | file_ext stringclasses 18
values | file_size_in_byte int64 113 2.92M | program_lang stringclasses 1
value | lang stringclasses 93
values | doc_type stringclasses 1
value | stars int64 0 179k ⌀ | dataset stringclasses 3
values | pt stringclasses 78
values |
|---|---|---|---|---|---|---|---|---|---|---|---|---|
25505977572 | import six
import tensorflow_hub as hub
import pandas as pd
from functions import get_logger, execute, to_sep_space, get_sim_index, show_sim_faq
from config import LOGDIR, JA_NNLM_MODEL, MUSCLE_QA
def main():
logger = get_logger(LOGDIR)
logger.info('start')
logger.info('load faq data')
qa_df = pd.read_csv(MUSCLE_QA)
q_txt = qa_df['q_txt'].tolist()
sep_q_txt = [to_sep_space(i) for i in q_txt]
logger.info('load NN Language Model')
embed = hub.Module(JA_NNLM_MODEL)
embeddings = embed(sep_q_txt)
logger.info('to vectors')
vecs = execute(embeddings)
logger.info('vector shape: {}'.format(vecs.shape))
while True:
text = six.moves.input('>> ')
if text == '':
break
sep_input = to_sep_space(text)
embeddings = embed([sep_input])
vec = execute(embeddings)
sort_i, sim = get_sim_index(vec, vecs)
df = qa_df.loc[sort_i]
show_sim_faq(df, sim)
logger.info('end')
if __name__ == '__main__':
main()
| trtd56/MuscleQA | src/muscle_qa_nnlm.py | muscle_qa_nnlm.py | py | 1,036 | python | en | code | 0 | github-code | 13 |
12228729380 | # -*- coding:utf-8 -*-
from django.conf.urls import url
from django.contrib import admin
from .views import (
StationListAPIView,
CommunityListAPIView,
SecondWaterListAPIView,
DMAListAPIView,
dmabaseinfo,
getDmaSelect,
)
app_name='devm-api'
urlpatterns = [
# url(r'^user/oranizationtree/$', OrganizationListAPIView.as_view(), name='organlist'),
# url(r'^create/$', PostCreateAPIView.as_view(), name='create'),
# url(r'^(?P<slug>[\w-]+)/$', PostDetailAPIView.as_view(), name='detail'),
# url(r'^(?P<slug>[\w-]+)/edit/$', PostUpdateAPIView.as_view(), name='update'),
# url(r'^(?P<slug>[\w-]+)/delete/$', PostDeleteAPIView.as_view(), name='delete'),
url(r'^stations/list/$', StationListAPIView.as_view(), name='stationlist'),
url(r'^dma/getDmaSelect/$', getDmaSelect, name='dmaselect'),
url(r'^dma/list/$', DMAListAPIView.as_view(), name='dmalist'),
url(r'^district/dmabaseinfo/$', dmabaseinfo, name='dmabaseinfo'),
url(r'^community/list/$', CommunityListAPIView.as_view(), name='communitylist'),
url(r'^secondwater/list/$', SecondWaterListAPIView.as_view(), name='secondwaterlist'),
]
| apengok/bsc2000 | dmam/api/urls.py | urls.py | py | 1,157 | python | en | code | 1 | github-code | 13 |
20032171796 | """upvote3 Migration
Revision ID: 528c1a134c57
Revises: 73e42c60678a
Create Date: 2021-04-28 12:39:38.258102
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = '528c1a134c57'
down_revision = '73e42c60678a'
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_constraint('upvote_user_id_fkey', 'upvote', type_='foreignkey')
op.drop_constraint('upvote_pitch_id_fkey', 'upvote', type_='foreignkey')
op.create_foreign_key(None, 'upvote', 'pitches', ['pitch_id'], ['id'], ondelete='SET NULL')
op.create_foreign_key(None, 'upvote', 'users', ['user_id'], ['id'], ondelete='SET NULL')
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_constraint(None, 'upvote', type_='foreignkey')
op.drop_constraint(None, 'upvote', type_='foreignkey')
op.create_foreign_key('upvote_pitch_id_fkey', 'upvote', 'pitches', ['pitch_id'], ['id'])
op.create_foreign_key('upvote_user_id_fkey', 'upvote', 'users', ['user_id'], ['id'])
# ### end Alembic commands ###
| kagus-code/Pitch-Piper | migrations/versions/528c1a134c57_upvote3_migration.py | 528c1a134c57_upvote3_migration.py | py | 1,181 | python | en | code | 0 | github-code | 13 |
16773949833 | # dfs__BOJ_21609_상어중학교
# NxN grid
# block : {black(-1), rainbow(0), ordinary(M)}
# === input ===
EDGE, NUM_OF_COLOR = map(int, input().split())
EMPTY = -2
BLACK, RAINBOW = -1, 0
board = [list(map(int, input().split())) for _ in range(EDGE)]
# === algorithm ===
def rotate_counterclockwise(arr: list) -> list:
new_arr = [[0]*EDGE for _ in range(EDGE)]
for r in range(EDGE):
for c in range(EDGE):
new_arr[EDGE - 1 - c][r] = arr[r][c]
return new_arr
def gravity(arr: list):
new_arr = [[EMPTY]*EDGE for _ in range(EDGE)]
for c in range(EDGE):
new_r = EDGE-1
for r in range(EDGE-1, -1, -1):
if arr[r][c] == BLACK:
new_arr[r][c] = arr[r][c]
new_r = r-1
elif arr[r][c] != EMPTY:
new_arr[new_r][c] = arr[r][c]
new_r -= 1
return new_arr
# block group: 연결 블록들 2개 이상
# - 기준 블록: 무지개 블록이 아닌 블록 중, 행의 번호가 가장 작은 블록, 열의 번호가 가장 작은 블록
# - 최소 하나의 일반 블록
# - 일반 블록은 모두 같은색
# - 검은색 X
# - 무지개 O
def find_biggest_group(arr: list):
ret_group = []
num_rainbow = 0
visited = [[False]*EDGE for _ in range(EDGE)]
def dfs(start) -> list:
ret_visited_list = []
will_visited_stack = [start]
visited[start[0]][start[1]] = True
color = arr[start[0]][start[1]]
rainbow_list = []
while will_visited_stack:
curr = will_visited_stack.pop()
ret_visited_list.append(curr)
for dr, dc in [(-1, 0), (0, -1), (1, 0), (0, 1)]:
nr, nc = curr[0]+dr, curr[1]+dc
if 0 <= nr < EDGE and 0 <= nc < EDGE:
if (arr[nr][nc] == RAINBOW or arr[nr][nc] == color) and not visited[nr][nc]:
will_visited_stack.append([nr, nc])
visited[nr][nc] = True
if arr[nr][nc] == RAINBOW:
rainbow_list.append([nr, nc])
# rainbow는 깍두기, 복구
for rr, rc in rainbow_list:
visited[rr][rc] = False
return [ret_visited_list, len(rainbow_list)]
for r in range(EDGE):
for c in range(EDGE):
if arr[r][c] > 0 and not visited[r][c]:
new_group, new_num_rainbow = dfs([r, c])
if len(new_group) > len(ret_group) or\
(len(new_group) == len(ret_group) and new_num_rainbow >= num_rainbow):
ret_group = new_group
num_rainbow = new_num_rainbow
if len(ret_group) == 1:
ret_group.clear()
return ret_group
def remove_group(arr, group: list) -> list:
for r, c in group:
arr[r][c] = EMPTY
return arr
def print_arr(arr):
for r in range(EDGE):
line = " ".join(map(str, arr[r]))
line = line.replace("-2", "_")
print(line)
# === output ===
answer = 0
while True:
# 1. 가장 큰 블록 그룹
biggest_group = find_biggest_group(board)
if not biggest_group:
break
# 2. 그룹 제거, B^2점 획득
board = remove_group(board, biggest_group)
answer += len(biggest_group) ** 2
# 3. 중력 작용
board = gravity(board)
# 4. 90 반시계 회전
board = rotate_counterclockwise(board)
# 5. 중력 작용
board = gravity(board)
print(answer) | 1092soobin2/Algorithm-Study | bfs,dfs/(1.5) dfs-stack__BOJ_21609_상어중학교.py | (1.5) dfs-stack__BOJ_21609_상어중학교.py | py | 3,626 | python | en | code | 1 | github-code | 13 |
8658469917 |
VERSION = "Cam_display v0.10"
import sys, time, threading, cv2
import numpy as np
from flirpy.camera.lepton import Lepton
from tifffile import imsave
import helperFunctions.skin_detector
import time
import h5py
from helperFunctions.spo2Functions import face_detect_and_thresh,spartialAverage,MeanRGB,SPooEsitmate,preprocess
from helperFunctions.leptonFunctions import grabTempValue,rawFrame,generate_colour_map,raw_to_8bit,ktof,ktoc,getFrame
from helperFunctions.csvSaver import saveCSVFromFrame
try:
from PyQt5.QtCore import Qt
pyqt5 = True
except:
pyqt5 = False
if pyqt5:
from PyQt5.QtCore import QTimer, QPoint, pyqtSignal
from PyQt5.QtWidgets import QApplication, QMainWindow, QTextEdit, QLabel,QPushButton
from PyQt5.QtWidgets import QWidget, QAction, QVBoxLayout, QHBoxLayout
from PyQt5.QtGui import QFont, QPainter, QImage, QTextCursor
else:
from PyQt4.QtCore import Qt, pyqtSignal, QTimer, QPoint
from PyQt4.QtGui import QApplication, QMainWindow, QTextEdit, QLabel
from PyQt4.QtGui import QWidget, QAction, QVBoxLayout, QHBoxLayout
from PyQt4.QtGui import QFont, QPainter, QImage, QTextCursor
try:
import Queue as Queue
except:
import queue as Queue
IMG_SIZE = 1280,720 # 640,480 or 1280,720 or 1920,1080
IMG_FORMAT = QImage.Format_RGB888
DISP_SCALE = 2 # Scaling factor for display image
DISP_MSEC = 50 # Delay between display cycles
CAP_API = cv2.CAP_ANY # API: CAP_ANY or CAP_DSHOW etc...
EXPOSURE = 0 # Zero for automatic exposure
TEXT_FONT = QFont("Courier", 10)
camera_num = 1 # Default camera (first in list)
image_queue = Queue.Queue() # Queue to hold images
capturing = True # Flag to indicate capturing
colorMapType = 1
frameCount=0
globalCount=0
duration=10
totalFrame = 250
# se;f.spo2Flag=0
final_sig=[]
spo2_set=[]
vid= Lepton()
# Grab images from the camera (separate thread)
camState = 'not_recording'
tiff_frame = 1
maxVal = 0
minVal = 0
def grab_images(cam_num, queue,self):
# global se;f.self.spo2Flagag
global frameCount
global globalCount
global final_sig
global totalFrame
cap = cv2.VideoCapture(cam_num-1 + CAP_API)
print(cam_num-1 + CAP_API)
cap.set(cv2.CAP_PROP_FRAME_WIDTH, IMG_SIZE[0])
cap.set(cv2.CAP_PROP_FRAME_HEIGHT, IMG_SIZE[1])
if EXPOSURE:
cap.set(cv2.CAP_PROP_AUTO_EXPOSURE, 0)
cap.set(cv2.CAP_PROP_EXPOSURE, EXPOSURE)
else:
cap.set(cv2.CAP_PROP_AUTO_EXPOSURE, 1)
while capturing:
if cap.grab():
if(self.frameDifferenceFlag):
frame = np.subtract(getFrame(),self.firstFrame)
else:
frame = getFrame()
# dataArray2d = data[40:120,25:60]
cv2.rectangle(frame,(60,40),(100,80),(0,0,255),3)
frame = cv2.resize(frame,(640,480))
print(frame.shape)
retval, image = cap.retrieve(0)
print(image.shape)
image = cv2.resize(image,(640,480))
displayImage = image.copy()
cv2.rectangle(displayImage,(240,160),(400,320),(0,0,255),3)
faceFrame = image[int(100*0.9):int(200*1.1),int(150*0.9):int(250*1.1)]
finalFrame = cv2.hconcat([frame,displayImage])
image = finalFrame
if image is not None and queue.qsize() < 2 or (self.spo2Flag):
if frameCount ==0 and self.spo2Flag==1:
print("working")
print(final_sig)
thresh,mask=face_detect_and_thresh(faceFrame)
temp,min_value,max_value=spartialAverage(mask,faceFrame)
if(type(temp)==type(2)):
print("failed estimation, try again")
frameCount=totalFrame
# break
self.spo2Flag=2
final_sig.append(temp)
elif (self.spo2Flag==1) and frameCount<totalFrame and frameCount>1:
# print("working")
# print(final_sig)
thresh,mask=face_detect_and_thresh(faceFrame)
final_sig.append(MeanRGB(thresh,faceFrame,final_sig[-1],min_value,max_value))
if frameCount==totalFrame:
if self.spo2Flag==1:
result=SPooEsitmate(final_sig,totalFrame,totalFrame,duration) # the final signal list is sent to SPooEsitmate function with length of the video
try:
self.label_2.setText("Temp:"+str(grabTempValue()[0]))
self.label_3.setText("Min-Temp:"+str(grabTempValue()[1]))
self.label_4.setText("Max-Temp:"+str(grabTempValue()[2]))
self.label_1.setText("SPO2 Level:"+str(int(np.ceil(result))))
except:
self.label_2.setText("Temp:"+"NA")
self.label_3.setText("Min-Temp:"+"NA")
self.label_4.setText("Max-Temp:"+"NA")
self.label_1.setText("SPO2 Level:"+"NA")
self.spo2Flag=0
elif self.spo2Flag==2:
self.spo2Flag=0
if self.spo2Flag!=2:
queue.put(image)
frameCount=frameCount+1
globalCount=globalCount +1
else:
time.sleep(DISP_MSEC / 1000.0)
else:
print("Error: can't grab camera image")
break
cap.release()
# Image widget
class ImageWidget(QWidget):
def __init__(self, parent=None):
super(ImageWidget, self).__init__(parent)
self.image = None
def setImage(self, image):
self.image = image
self.setMinimumSize(image.size())
self.update()
def paintEvent(self, event):
qp = QPainter()
qp.begin(self)
if self.image:
qp.drawImage(QPoint(0, 0), self.image)
qp.end()
# Main window
class MyWindow(QMainWindow):
text_update = pyqtSignal(str)
# Create main window
def __init__(self, parent=None):
QMainWindow.__init__(self, parent)
self.central = QWidget(self)
self.textbox = QTextEdit(self.central)
self.textbox.setFont(TEXT_FONT)
self.textbox.setMinimumSize(300, 100)
self.text_update.connect(self.append_text)
self.label_1 = QLabel('SPO2 Level:',self)
self.label_1.setStyleSheet("background-color: white; border: 1px solid black;")
self.label_2 = QLabel('Temperature:',self)
self.label_2.setStyleSheet("background-color: white; border: 1px solid black;")
self.label_3 = QLabel('Min-Temp:',self)
self.label_3.setStyleSheet("background-color: white; border: 1px solid black;")
self.label_4 = QLabel('Max-Temp:',self)
self.label_4.setStyleSheet("background-color: white; border: 1px solid black;")
self.button_1= QPushButton("Start",self)
self.button_1.clicked.connect(self.getValues)
self.button_2= QPushButton("Temperature",self)
self.button_2.clicked.connect(self.temperatureUpdate)
self.button_3= QPushButton("Frame Difference",self)
self.button_3.clicked.connect(self.frameDifference)
self.spo2Flag=False
self.frameDifferenceFlag = False
self.firstFrame=[]
sys.stdout = self
print("Camera number %u" % camera_num)
print("Image size %u x %u" % IMG_SIZE)
if DISP_SCALE > 1:
print("Display scale %u:1" % DISP_SCALE)
self.vlayout = QVBoxLayout() # Window layout
self.displays = QHBoxLayout()
self.displaysLabel = QHBoxLayout()
self.displaysSecond = QHBoxLayout()
self.disp = ImageWidget(self)
self.displays.addWidget(self.disp)
self.displaysLabel.addWidget(self.label_1)
self.displaysLabel.addWidget(self.label_2)
self.displaysSecond.addWidget(self.label_3)
self.displaysSecond.addWidget(self.label_4)
self.vlayout.addLayout(self.displays)
self.vlayout.addLayout(self.displaysLabel)
self.vlayout.addWidget(self.button_1)
self.vlayout.addWidget(self.button_2)
self.vlayout.addWidget(self.button_3)
self.vlayout.addLayout(self.displaysSecond)
# self.vlayout.addLayout(self.displays)
self.label = QLabel(self)
self.vlayout.addWidget(self.label)
self.vlayout.addWidget(self.textbox)
self.central.setLayout(self.vlayout)
self.setCentralWidget(self.central)
self.mainMenu = self.menuBar() # Menu bar
exitAction = QAction('&Exit', self)
exitAction.setShortcut('Ctrl+Q')
exitAction.triggered.connect(self.close)
self.fileMenu = self.mainMenu.addMenu('&File')
self.fileMenu.addAction(exitAction)
def frameDifference(self):
self.frameDifferenceFlag = not self.frameDifferenceFlag
self.firstFrame = getFrame()
def temperatureUpdate(self):
# self.label_2.setText("Temp:"+str(grabTempValue()))
self.label_2.setText("Temp:"+str(grabTempValue()[0]))
self.label_3.setText("Min-Temp:"+str(grabTempValue()[1]))
self.label_4.setText("Max-Temp:"+str(grabTempValue()[2]))
def getValues(self):
global frameCount,final_sig
final_sig=[]
frameCount = 0
self.spo2Flag=True
self.label_1.setText("SPO2:" +" ")
# self.label_2.setText("Temp:"+str(grabTempValue()))
self.label_2.setText("Temp:"+str(grabTempValue()[0]))
self.label_3.setText("Min-Temp:"+str(grabTempValue()[1]))
self.label_4.setText("Max-Temp:"+str(grabTempValue()[2]))
# self.label_2.setText("Temperature:" +" ")
# Start image capture & display
def start(self):
self.timer = QTimer(self) # Timer to trigger display
self.timer.timeout.connect(lambda:
self.show_image(image_queue, self.disp, DISP_SCALE))
self.timer.start(DISP_MSEC)
self.capture_thread = threading.Thread(target=grab_images,
args=(camera_num, image_queue,self))
self.capture_thread.start() # Thread to grab images
# Fetch camera image from queue, and display it
def show_image(self, imageq, display, scale):
if not imageq.empty():
image = imageq.get()
if image is not None and len(image) > 0:
img = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
self.display_image(img, display, scale)
# Display an image, reduce size if required
def display_image(self, img, display, scale=1):
disp_size = img.shape[1]//scale, img.shape[0]//scale
disp_bpl = disp_size[0] * 3
if scale > 1:
img = cv2.resize(img, disp_size,
interpolation=cv2.INTER_CUBIC)
qimg = QImage(img.data, disp_size[0], disp_size[1],
disp_bpl, IMG_FORMAT)
display.setImage(qimg)
# Handle sys.stdout.write: update text display
def write(self, text):
self.text_update.emit(str(text))
def flush(self):
pass
# Append to text display
def append_text(self, text):
cur = self.textbox.textCursor() # Move cursor to end of text
cur.movePosition(QTextCursor.End)
s = str(text)
while s:
head,sep,s = s.partition("\n") # Split line at LF
cur.insertText(head) # Insert text at cursor
if sep: # New line if LF
cur.insertBlock()
self.textbox.setTextCursor(cur) # Update visible cursor
# Window is closing: stop video capture
def closeEvent(self, event):
global capturing
capturing = False
self.capture_thread.join()
if __name__ == '__main__':
if len(sys.argv) > 1:
try:
camera_num = int(sys.argv[1])
except:
camera_num = 0
if camera_num < 1:
print("Invalid camera number '%s'" % sys.argv[1])
else:
app = QApplication(sys.argv)
win = MyWindow()
win.showMaximized()
win.setWindowTitle(VERSION)
win.start()
sys.exit(app.exec_())
| TheBluePhoenix10/multiparaOS | src/test.py | test.py | py | 12,695 | python | en | code | 0 | github-code | 13 |
27165492465 |
with open('input.txt', 'r') as file:
inp = file.read().strip()
sections = inp.split('\n')
total = 0
total_part2 = 0
for sec in sections:
var1, var2 = sec.split(',')
var1_start, var1_end = map(int, var1.split('-'))
var2_start, var2_end = map(int, var2.split('-'))
if(var1_start <= var2_start and var2_end <= var1_end) \
or (var2_start <= var1_start and var1_end <= var2_end):
total += 1
if set(range(var1_start, var1_end + 1)) & set(range(var2_start, var2_end + 1)):
total_part2 += 1
print(f"Number of assignments that are fully covered by the second elf:", total)
print(f"Overlapping assignments:", total_part2) | LaCroix0/Code-of-Advent-2022 | day_4/day_4.py | day_4.py | py | 697 | python | en | code | 0 | github-code | 13 |
31318752744 | """Module 7 - Lab 2
We're going to practice installing, importing and using external python modules while learning out how to scrape web pages.
"""
# Import libraries
import requests
from bs4 import BeautifulSoup
# Scraping the web
# Set up our url as a string
url = "https://wiki.python.org/moin/IntroductoryBooks"
# Pull down all the html and store it in a page variable
response = requests.get(url)
# Returns a requests object
#print(response.headers)
content = response.content
# Parsing webpages
# Convert to beautifulsoup format
soup = BeautifulSoup(content, "html.parser")
# prettify to convert html to a more readable format
#print(soup.prettify())
# title tags
#print(soup.title)
# convert to a string
#print(soup.title.string)
# find all a tags
all_a = soup.find_all("a")
# use a for loop
#for x in all_a:
# print(x)
# add a class argument to search within div tags
all_a_https = soup.find_all("a", "https")
#for x in all_a_https:
# print(x)
# index this item like a list
print(all_a_https[0])
# loop through other metadata (attributes) that are nested inside the dev tag that we pulled out
for x in all_a_https:
print(x.attrs["href"])
# use this info to make a dictionary of books and their links (Question)
data = {}
for a in all_a_https:
title = a.string.strip()
data[title] = a.attrs["href"]
print(data)
| dtingg/IntroToPython | Module 7/mod7_lab2.py | mod7_lab2.py | py | 1,348 | python | en | code | 0 | github-code | 13 |
19304981596 | # Write a program to accept a number from 1 to 7 and display the name of the day like 1
# for Sunday , 2 for Monday and so on.
dict = {1:"Sunday", 2: "Monday", 3: "Tuesday", 4:"Wednesday", 5:"Thursday", 6:"Friday", 7:"Sturday"}
num = int(input("Input the number of the day you will like to see. Note(between 1-7): "))
if num in dict:
print(num, 'for', dict[num])
else:
print("An Error Occured: Input a valid number from 1-7.") | olaoyeisrael/Input_and_output_in_py | Answer7.py | Answer7.py | py | 437 | python | en | code | 0 | github-code | 13 |
71540701457 | from random import randint
nb1 = randint(0, 100)
nb_input = int(input("Entrez un nombre: "))
while nb_input != nb1:
if nb_input < nb1:
print("trop petit")
else:
print("trop grand")
nb_input = int(input("Entrez un nombre: "))
print(f"Vous avez trouvé le nombre caché était donc bien: {nb1}")
| Fixer38/University-Notes | semester-1/progra-ex/manip2/ex12-random.py | ex12-random.py | py | 327 | python | fr | code | 0 | github-code | 13 |
71082097937 | import numpy as np
from activation import sigmoid_derivative, sigmoid_function
class MLP:
def __init__(
self,
input_size,
hidden_size,
output_size,
activation,
derivative_activation,
) -> None:
"""
This is a three layer neural network with:
input layer:
hidden layer:
output layer:
input_size: number of input neurons
hidden_size: number of hidden neurons
output_size: number of output neurons
"""
self.input_size = input_size
self.hidden_size = hidden_size
self.output_size = output_size
self.activation = activation
self.derivative_activation = derivative_activation
self.weights = np.random.randn(
self.input_size, self.hidden_size
) # rowsXcolumns
self.bias_hidden = np.zeros((1, self.hidden_size)) # rowsXcolumns
self.bias_outputs = np.zeros((1, self.output_size)) # rowsXcolumns
self.weights_outputs = np.random.randn(
self.hidden_size,
self.output_size,
)
self.learning_rate = 0.001
def forward(self, X):
self.hidden_activation = self.activation(
np.dot(X, self.weights) + self.bias_hidden
)
self.output_activation = self.activation(
np.dot(self.hidden_activation, self.weights_outputs) + self.bias_outputs
)
return self.output_activation
def backpropergation(self, x, y, y_, lr):
error = y - y_
delta_output = error * self.derivative_activation(
y_,
)
error_hidden = delta_output.dot(self.weights_outputs.T)
delta_hidden = error_hidden * self.derivative_activation(self.hidden_activation)
self.weights_outputs += self.hidden_activation.T.dot(delta_output) * lr
self.bias_outputs += np.sum(delta_output) * lr
self.weights += x.T.dot(delta_hidden) * lr
self.bias_hidden += np.sum(delta_hidden) * lr
def train(self, X, y, epochs, lr):
for _ in range(epochs):
for i in range(len(X)):
x = X[i]
target = y[i]
y_ = self.forward(x)
self.backpropergation(x, target, y_, lr)
def predict(self, x):
return self.forward(x)
# Example usage
if __name__ == "__main__":
X = np.array([[[0, 0]], [[0, 1]], [[1, 0]], [[1, 1]]])
y = np.array([[0], [1], [1], [0]])
mlp = MLP(
input_size=2,
hidden_size=4,
output_size=1,
activation=sigmoid_function,
derivative_activation=sigmoid_derivative,
)
mlp.train(X, y, epochs=10000, lr=0.1)
for x in X:
print(f"Input: {x}, Predicted: {mlp.predict(x)}")
| wayneotemah/ML-from-scratch | perceptron/multilayerpercepton.py | multilayerpercepton.py | py | 2,788 | python | en | code | 0 | github-code | 13 |
1486440233 | import random
import time
import os
from pprint import pprint
class Pokemon:
def __init__(self, name, hp, attack, defense, speed, attaks) -> None:
self.name = name
self.hp = hp
self.attack = attack
self.defense = defense
self.speed = speed
self.attacks = attaks
def turn(self):
print("----------------\n")
print("Seleccióna un ataque de {}\n".format(self.name))
print("----------------\n")
text_attacks = ""
i = 0
for x in self.attacks:
text_attacks = text_attacks + \
"{}: {} ({} ATTK) \n".format(i, x[0], x[1])
i += 1
attack = -1
len_attacks = len(self.attacks)
while attack < 0 or attack >= len_attacks:
try:
attack = int(input(text_attacks + "\n - "))
except ValueError:
attack = -1
return int(attack)
def summary(self):
print(pprint(vars(self)))
pikachu = Pokemon("Pikachu",
35,
55,
40,
90,
[
["Moflete estático", 20, "enemy_speed_down_1.2"],
['Atizar', 80, None],
['Trueno', 110, None],
['Agilidad', 0, "self_speed_up_1.2"]
])
squirtle = Pokemon("Squirtle",
44,
48,
65,
43,
[
["Placaje", 40, None],
['Pistola agua', 40, None],
['Defensa férrea', 0, "self_defense_up_1.2"],
['Hidrobomba', 110, None]
])
turno = 1
first = ""
attack = ""
def checkSpeed(pokemon_1, pokemon_2):
if(pokemon_1.speed > pokemon_2.speed):
return pokemon_1.name
elif(pokemon_1.speed < pokemon_2.speed):
return pokemon_2.name
elif (pokemon_1.speed == pokemon_2.speed):
random_number = random.randint(0, 10)
if(random_number % 2 == 0):
return pokemon_1.name
elif (random_number % 2 == 1):
return pokemon_2.name
def attack_turn(attack, pokemon_1, pokemon_2):
name = pokemon_1.attacks[attack][0]
dmg = pokemon_1.attacks[attack][1]
effect = pokemon_1.attacks[attack][2]
if(dmg > 0):
damage = attack_defense(dmg, pokemon_1.attack, pokemon_2.defense)
if pokemon_2.hp - damage < 0:
pokemon_2.hp = 0
else:
pokemon_2.hp = pokemon_2.hp - damage
if(effect != None):
attack_effect(effect, pokemon_1, pokemon_2)
else:
print("----------------\n")
if(dmg > 0):
print("{} ha usado el ataque {}, {} ha recibido {} de daño\n".format(
pokemon_1.name, name, pokemon_2.name, damage))
else:
print("{} ha usado el ataque {}, {} no ha recibido daño\n".format(
pokemon_1.name, name, pokemon_2.name))
print("----------------\n")
time.sleep(2)
def attack_defense(dmg, attack, defense):
v = random.randint(85, 100)
front = 0.01 * 1 * 1 * v
top = (0.2 * 1 + 1) * attack * dmg
bottom = 25 * defense
damage = round(front * ((top / bottom) + 2))
if damage == 0:
damage = 1
return damage
def attack_effect(effect, pokemon_1, pokemon_2):
effect = effect.split("_")
pokemon_ = None
who = effect[0]
attribute = effect[1]
direction = effect[2]
value = float(effect[3])
if who == 'self':
pokemon_ = pokemon_1
else:
pokemon_ = pokemon_2
if attribute == "speed":
speed(direction, pokemon_, value)
elif attribute == "defense":
defense(direction, pokemon_, value)
else:
return False
print("----------------\n")
print("* La {} de {} ha cambiado \n ".format(
attribute.upper(), pokemon_.name))
time.sleep(2)
def speed(direction, pokemon, value):
if direction == "up":
pokemon.speed = pokemon.speed * value
else:
pokemon.speed = pokemon.speed / value
pokemon.speed = round(pokemon.speed)
def defense(direction, pokemon, value):
if direction == "up":
pokemon.defense = pokemon.defense * value
else:
pokemon.defense = pokemon.defense / value
pokemon.defense = round(pokemon.defense)
def resumen(pokemon_1, pokemon_2, turno):
print("----------------\n")
print("FIN TURNO {}\n".format(turno))
print("*{}\n".format(pokemon_1.name))
pokemon_1.summary()
print("*{}\n".format(pokemon_2.name))
pokemon_2.summary()
print("----------------\n")
time.sleep(3)
os.system("clear")
def endGame(pokemon_1, pokemon_2):
text = ""
if pokemon_1.hp <= 0:
text = "{} ha ganado el combate".format(pokemon_2.name)
elif pokemon_2.hp <= 0:
text = "{} ha ganado el combate".format(pokemon_1.name)
else:
text = "Empate entre {} y {}".format(pokemon_1.name, pokemon_2.name)
return text
while pikachu.hp > 0 and squirtle.hp > 0:
print("----------------\n")
print("INICIO DEL TURNO {}\n".format(turno))
print("----------------\n")
first = checkSpeed(pikachu, squirtle)
if first == pikachu.name:
attack = pikachu.turn()
attack_turn(attack, pikachu, squirtle)
if(squirtle.hp == 0):
break
attack = squirtle.turn()
attack_turn(attack, squirtle, pikachu)
if(pikachu.hp == 0):
break
elif first == squirtle.name:
attack = squirtle.turn()
attack_turn(attack, squirtle, pikachu)
if(pikachu.hp == 0):
break
attack = pikachu.turn()
attack_turn(attack, pikachu, squirtle)
if(squirtle.hp == 0):
break
resumen(pikachu, squirtle, turno)
turno += 1
print(endGame(pikachu, squirtle))
| AdrianVillamayor/Python_First_Try | pokemon_fight.py | pokemon_fight.py | py | 5,959 | python | en | code | 0 | github-code | 13 |
5792350135 | from threading import Thread
from queue import Queue
from message import Message, MessageType
import socket
import select
class Connection:
def __init__(self, _logger, socket, address="unknown", fileroot='/tmp'):
self._logger = _logger
self.socket = socket
self.address = address
# Create a buffer byte array for our client
self.buffer = b""
self.responses = {}
self.file = False
self.fileroot = fileroot
# Close our socket and cleanup
def close(self):
self.socket.close()
# Close any open files
if self.fileIsOpen():
self.file.close()
def processMessage(self, message):
# ### Process the message depending on what type of message it is
if message.type == MessageType.FileStart:
# If FileStart, open a new file for writing to
self.file = open(
"{0}/{1}".format(self.fileroot, message.filename), "wb")
self._logger.debug(
"Opened: {}".format(message.filename))
if message.type in MessageType.File:
# Check if a file was never opened for this connection
if not self.fileIsOpen():
raise RuntimeError("No file opened")
# All File message types have a content, lets write that to the
# file.
self.file.write(message.content)
self._logger.debug("Wrote {}.".format(
message.content))
# We can go ahead and close the file if we receive a FileEnd message
if message.type == MessageType.FileEnd:
self.file.close()
def processBuffer(self, buffer):
# Add this events buffer to our overall buffer
self.buffer += buffer
# Our packets are terminated with a null terminator (\0).
# If we find one we know we have received a whole packet.
packetMarkerPosition = self.buffer.find(b'\0')
while packetMarkerPosition != -1:
try:
# Extract our packet from the buffer
messageBuffer = self.buffer[:packetMarkerPosition]
# Attempt to convert our packet into a message
message = Message.fromBytes(messageBuffer)
self._logger.debug("Got a {} message!".format(message.type.name))
self.processMessage(message)
# If we have any issues running the above code, such as failing to
# parse the incoming bytes into a valid message, we should log that
# error.
except RuntimeError as err:
self._logger.error(err)
finally:
# Trim the buffer of packet we just processed
self.buffer = self.buffer[packetMarkerPosition + 1:]
# Check if there is another whole packet in the buffer
packetMarkerPosition = self.buffer.find(
b'\0')
def recv(self, bufferSize):
buffer = self.socket.recv(bufferSize)
# If we get an empty message, when know the communication channel
# has been closed
if len(buffer) == 0:
self.shutdown()
return 0
self._logger.debug("Got bytes: {0}".format(buffer))
self.processBuffer(buffer)
return None
def fileIsOpen(self):
return self.file and not self.file.closed
def shutdown(self):
self.socket.shutdown(socket.SHUT_RDWR)
class Server:
def __init__(self, _logger, config):
self._logger = _logger
# Setup config with defaults
self.config = {
'file_root': '/tmp',
'event_timeout': 0.2,
'internal_recv_size': 8192
}
self.config.update(config)
# This msgQueue can be used to communicate messages to the server thread
# See the commented out section in run for more info
self.msgQueue = Queue()
# Open a new socket to listen on
self.socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
# Set the socket to reuse old port if server is restarted
self.socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
# This is set to true when we want to end the server loop
self.done = False
def listen(self, port, addr='0.0.0.0'):
# Sets the interface and port number for the socket to listen for connections
# on.
self.socket.bind((addr, port))
self.socket.listen(1)
# In order to prevent locking up the main thread, we start a new child thread.
# This child thread will continously run the server's loop function and
# check self.done periodically if to see if it should end
thread = Thread(target=self.loop, args=())
thread.start()
self._logger.debug("Server listening on {}".format(port))
# We will return the thread handle so that it can be acted on in the future
return thread
def close(self):
self.done = True
def loop(self):
connections = {}
# See http://scotdoyle.com/python-epoll-howto.html for a detailed
# explination on the epoll interface
epoll = select.epoll()
# We register our socket server in EPOLLIN mode to watch for incomming
# connections.
epoll.register(self.socket.fileno(), select.EPOLLIN)
try:
# Check if we should end our loop
while not self.done:
# This will return any new events
events = epoll.poll(self.config['event_timeout'])
# Process any new events
for fileno, event in events:
# This handles a new connection
if fileno == self.socket.fileno():
client, address = self.socket.accept()
client.setblocking(0)
self._logger.info(
"New connection from {0}".format(address))
# Store our client in a connections dictionary
connections[client.fileno()] = Connection(self._logger, client, address, self.config['file_root'])
# Register incomming client connection with our epoll interface
epoll.register(client.fileno(), select.EPOLLIN)
# This event is called when there is data to be read in
elif event & select.EPOLLIN:
# Try to receive data from our client
mode = connections[fileno].recv(self.config['internal_recv_size'])
if mode is not None:
epoll.modify(fileno, mode)
# Check if transmission is complete. In our case we are
# using an NULL termination (\0)
# Now that we know the transmission is complete, we should
# send a response (switch to send mode)
# if not buffers[fileno].endswith(b'\\\\0') and buffers[fileno].endswith(b'\\0'):
# responses[fileno] = b'HTTP/1.0 200 OK\r\nDate: Mon, 1 Jan 1996 01:01:01 GMT\r\n'
# responses[fileno] += b'Content-Type: text/plain\r\nContent-Length: 13\r\n\r\n'
# responses[fileno] += b'Hello, world!'
# epoll.modify(fileno, select.EPOLLOUT)
# Server.parse_message(buffers[fileno])
# This event is called when there is data to be written out
elif event & select.EPOLLOUT:
pass
# # Send out our response
# numBytesWritten = connections[fileno].send(
# responses[fileno])
# self._logger.debug("Sent response: {0}".format(
# responses[fileno][:numBytesWritten]))
#
# # Truncate our response buffer (remove the part that is
# # already sent)
# responses[fileno] = responses[fileno][numBytesWritten:]
# self._logger.debug(responses[fileno])
#
# if len(responses[fileno]) == 0:
# epoll.modify(fileno, select.EPOLLIN)
# Endpoint has closed the connection (No need to send shutdown)
elif event & select.EPOLLHUP:
self._logger.debug("Connection to [{}] closed!".format(connections[fileno].address))
epoll.unregister(fileno)
connections[fileno].close()
del connections[fileno]
finally:
# Close all open connections
self._logger.debug("Closing all connections...")
for fileno in connections:
epoll.unregister(fileno)
connections[fileno].close()
# Unregister our server socket with our epoll
epoll.unregister(self.socket.fileno())
# Close our epoll
epoll.close()
# Close our socket server
self.socket.close()
self._logger.info("Server shutdown")
| FallingSnow/simplified-ftp | src/simplified_ftp/server.py | server.py | py | 9,440 | python | en | code | 0 | github-code | 13 |
72137408017 | from math import sqrt
from matplotlib import pyplot
import numpy as np
from pandas import read_csv
from sklearn.metrics import mean_squared_error
from sklearn.metrics import mean_absolute_error
from sklearn.metrics import mean_absolute_percentage_error
count=1
count1=1
dataset_name="USDCNY"
for i in range(30):
origin = read_csv('./GNSNP/CNYUSD/origin/' + str(i+1) + '_cnyusd.csv', delimiter=',',header=None).values
predictions=read_csv('./GNSNP/CNYUSD/prediction/' + str(i+1) + '_cnyusd.csv', delimiter=',',header=None).values
rmse = sqrt(mean_squared_error(origin, predictions))
mse = mean_squared_error(origin, predictions)
mae = mean_absolute_error(origin, predictions)
mape = mean_absolute_percentage_error(origin, predictions)
meanV = np.mean(origin)
error = abs(origin - predictions)
print(dataset_name + ' Test RMSE: %.15f ,MAE: %.15f ,MSE: %.15f ,MAPE: %.15f ' % (rmse, mae,mse, mape))
# predicted
fig4 = pyplot.figure()
ax41 = fig4.add_subplot(111)
pyplot.xticks(fontsize=15)
pyplot.yticks(fontsize=15)
ax41.set_xlabel("Time", fontsize=15)
ax41.set_ylabel("Magnitude", fontsize=15)
pyplot.plot(origin, '-', label='the original data')
pyplot.plot(predictions, '--', label='the GNSNP predicted data')
pyplot.legend()
pyplot.title(dataset_name)
count = count + 1
pyplot.show()
# error
fig1 = pyplot.figure()
ax42 = fig1.add_subplot(111)
pyplot.xticks(fontsize=15)
pyplot.yticks(fontsize=15)
ax42.set_xlabel("Time", fontsize=15)
ax42.set_ylabel("Magnitude", fontsize=15)
pyplot.plot(error, '-', label='the GNSNP error data')
pyplot.legend()
pyplot.title(dataset_name)
count1 = count1 + 1
pyplot.show()
| xhuph66/GNSNP | compute.py | compute.py | py | 1,749 | python | en | code | 0 | github-code | 13 |
14326520580 | import sys
from PyQt5.QtCore import *
from PyQt5.QtGui import *
from PyQt5.Qt import Qt
from PyQt5.QtWidgets import *
class KeyboardWidget(QDialog):
def __init__(self, parent=None):
super(KeyboardWidget, self).__init__(parent)
self.currentTextBox = None
self.signalMapper = QSignalMapper(self)
self.signalMapper.mapped[int].connect(self.buttonClicked)
self.initUI()
@pyqtSlot()
def do_caps(self):
# self.timer.start()
self.names = self.names_caps
self.buttonAdd()
self.cap_button.setText("Caps")
self.cap_button.clicked.disconnect()
self.cap_button.clicked.connect(self.do_small)
@pyqtSlot()
def do_small(self):
# self.timer.stop()
self.names = self.names_small
self.buttonAdd()
self.cap_button.setText("Small")
self.cap_button.clicked.disconnect()
self.cap_button.clicked.connect(self.do_caps)
def initUI(self):
self.layout = QGridLayout()
# p = self.palette()
# p.setColor(self.backgroundRole(),Qt.white)
# self.setPalette(p)
self.setAutoFillBackground(True)
self.text_box = QTextEdit()
self.text_box.setFont(QFont('Arial', 12))
# text_box.setFixedHeight(50)
# self.text_box.setFixedWidth(300)
self.layout.addWidget(self.text_box, 0, 0, 1, 10)
self.names_caps = ['1', '2', '3', '4', '5', '6', '7', '8', '9', '0', '.', '(', ')',
'A', 'B', 'C', 'D', 'E', 'F', 'G', 'H', 'I', 'J', 'K', 'L', 'M',
'N', 'O', 'P', 'Q', 'R', 'S', 'T', 'U', 'V', 'W', 'X', 'Y', 'Z']
self.names_small = ['1', '2', '3', '4', '5', '6', '7', '8', '9', '0', '.', '(', ')',
'a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i', 'j', 'k', 'l', 'm',
'n', 'o', 'p', 'q', 'r', 's', 't', 'u', 'v', 'w', 'x', 'y', 'z']
self.names_sym = ['1', '2', '3', '4', '5', '6', '7', '8', '9', '0', '.',
'~', '`', '@', '#', '$', '%', '^', '&&', '*', '(',
')', '_', '-', '+', '=', '|', '[', ']', '{', '}', "'",
'"', '<', '>', '?', '\\', '/', '!']
self.names = self.names_small
self.buttonAdd()
# Cancel button
clear_button = QPushButton('Clear')
# clear_button.setFixedHeight(25)
clear_button.setSizePolicy(QSizePolicy.Minimum, QSizePolicy.Minimum)
clear_button.setFont(QFont('Arial', 12))
clear_button.KEY_CHAR = Qt.Key_Clear
self.layout.addWidget(clear_button, 5, 0, 1, 2)
# self.layout.addWidget(clear_button, 8, 2, 1, 2)
clear_button.clicked.connect(self.signalMapper.map)
self.signalMapper.setMapping(clear_button, clear_button.KEY_CHAR)
# clear_button.setFixedWidth(60)
# Space button
space_button = QPushButton('Space')
# space_button.setFixedHeight(25)
space_button.setSizePolicy(QSizePolicy.Minimum, QSizePolicy.Minimum)
space_button.setFont(QFont('Arial', 12))
space_button.KEY_CHAR = Qt.Key_Space
self.layout.addWidget(space_button, 5, 2, 1, 2)
# self.layout.addWidget(space_button, 5, 4, 1, 3)
space_button.clicked.connect(self.signalMapper.map)
self.signalMapper.setMapping(space_button, space_button.KEY_CHAR)
# space_button.setFixedWidth(85)
# Back button
back_button = QPushButton('Back')
# back_button.setFixedHeight(25)
back_button.setSizePolicy(QSizePolicy.Minimum, QSizePolicy.Minimum)
back_button.setFont(QFont('Arial', 12))
back_button.KEY_CHAR = Qt.Key_Backspace
self.layout.addWidget(back_button, 5, 4, 1, 2)
# self.layout.addWidget(back_button, 5, 7, 1, 2)
back_button.clicked.connect(self.signalMapper.map)
self.signalMapper.setMapping(back_button, back_button.KEY_CHAR)
# back_button.setFixedWidth(60)
# Enter button
enter_button = QPushButton('Enter')
# enter_button.setFixedHeight(25)
enter_button.setSizePolicy(QSizePolicy.Minimum, QSizePolicy.Minimum)
enter_button.setFont(QFont('Arial', 12))
enter_button.KEY_CHAR = Qt.Key_Enter
self.layout.addWidget(enter_button, 5, 6, 1, 2)
# self.layout.addWidget(enter_button, 5, 9, 1, 2)
enter_button.clicked.connect(self.signalMapper.map)
self.signalMapper.setMapping(enter_button, enter_button.KEY_CHAR)
# enter_button.setFixedWidth(60)
# Done button
done_button = QPushButton('Done')
# done_button.setFixedHeight(25)
done_button.setSizePolicy(QSizePolicy.Minimum, QSizePolicy.Minimum)
done_button.setFont(QFont('Arial', 12))
done_button.KEY_CHAR = Qt.Key_Home
self.layout.addWidget(done_button, 4, 9, 1, 1)
# self.layout.addWidget(done_button, 5, 11, 1, 2)
done_button.clicked.connect(self.signalMapper.map)
self.signalMapper.setMapping(done_button, done_button.KEY_CHAR)
# done_button.setFixedWidth(60)
# Done button
self.cap_button = QPushButton('Caps')
# self.cap_button.setFixedHeight(25)
self.cap_button.setSizePolicy(QSizePolicy.Minimum, QSizePolicy.Minimum)
self.cap_button.setFont(QFont('Arial', 12))
self.cap_button.KEY_CHAR = Qt.Key_Up
self.layout.addWidget(self.cap_button, 5, 8, 1, 1)
# self.layout.addWidget(self.cap_button, 5, 13, 1, 2)
self.cap_button.clicked.connect(self.signalMapper.map)
self.signalMapper.setMapping(self.cap_button, self.cap_button.KEY_CHAR)
# self.cap_button.setFixedWidth(60)
self.cap_button.clicked.connect(self.do_caps)
# Done button
sym_button = QPushButton('Sym')
# sym_button.setFixedHeight(25)
# sym_button.setFixedWidth(60)
sym_button.setSizePolicy(QSizePolicy.Minimum, QSizePolicy.Minimum)
sym_button.setFont(QFont('Arial', 12))
sym_button.KEY_CHAR = Qt.Key_Down
self.layout.addWidget(sym_button, 5, 9, 1, 1)
# self.layout.addWidget(sym_button, 5, 15, 1, 2)
sym_button.clicked.connect(self.signalMapper.map)
self.signalMapper.setMapping(sym_button, sym_button.KEY_CHAR)
self.setGeometry(0, 0, 480, 300)
self.setLayout(self.layout)
def buttonAdd(self):
# self.names = self.names_small
print("loe")
positions = [(i + 1, j) for i in range(6) for j in range(10)]
for position, name in zip(positions, self.names):
if name == '':
continue
button = QPushButton(name)
button.setFont(QFont('Arial', 12))
button.KEY_CHAR = ord(name[-1])
button.clicked.connect(self.signalMapper.map)
self.signalMapper.setMapping(button, button.KEY_CHAR)
self.layout.addWidget(button, *position)
def buttonClicked(self, char_ord):
txt = self.text_box.toPlainText()
if char_ord == Qt.Key_Up:
pass
elif char_ord == Qt.Key_Down:
self.names = self.names_sym
self.buttonAdd()
elif char_ord == Qt.Key_Backspace:
txt = txt[:-1]
elif char_ord == Qt.Key_Enter:
txt += chr(10)
elif char_ord == Qt.Key_Home:
self.currentTextBox.setText(txt)
# self.text_box.setText("")
self.hide()
return
elif char_ord == Qt.Key_Clear:
txt = ""
elif char_ord == Qt.Key_Space:
txt += ' '
else:
txt += chr(char_ord)
self.text_box.setText(txt)
class Communicate(QObject):
mousePressSignal = pyqtSignal()
class cQLineEdit(QTextEdit):
clicked = pyqtSignal()
def __init__(self, widget, name):
super().__init__(widget)
# self.name = name
self.ex = KeyboardWidget()
self.ex.currentTextBox = self
self.ex.currentTextBox.setText(name)
self.ex.text_box.setText(name)
# def mouseDoubleClickEvent(self, QMouseEvent):
def mousePressEvent(self, QMouseEvent):
self.ex.show()
self.clicked.emit()
'''
class cQLineEdit(QTextEdit):
clicked = pyqtSignal()
def __init__(self, widget):
super().__init__(widget)
self.ex = KeyboardWidget()
def mousePressEvent(self, QMouseEvent):
self.ex.currentTextBox = self
self.ex.show()
self.clicked.emit()
'''
| eurosa/DigilineSystem-new | virtual_keyboard_qtextedit.py | virtual_keyboard_qtextedit.py | py | 8,578 | python | en | code | 0 | github-code | 13 |
25055574735 | import pandas as pd
from matplotlib import pyplot as plt
import numpy as np
# read the csv file into a dataframe
df = pd.read_csv('rotten_tomatoes_movies.csv', engine='python')
print(df.head())
# fitler out movies wihout a numeric rating
df.dropna(subset=['tomatometer_rating', 'audience_rating', 'actors'], inplace=True)
# get data frame of movies with Timmothée Chalamet and Tom Holland
df_chalamet = df[df['actors'].str.contains('Chalamet')]
df_tom = df[df['actors'].str.contains('Tom Holland')]
# describe the tamatometer ratings for Timmothée Chalamet and Tom Holland movies
print(df_chalamet['tomatometer_rating'].describe())
print(df_tom['tomatometer_rating'].describe())
#create a boxplot of tomato meter ratings for Timmothée Chalamet and Tom Holland movies
plt.boxplot([df_chalamet['tomatometer_rating'], df_tom['tomatometer_rating']], labels=['Timmothée Chalamet', 'Tom Holland'])
plt.title('Tomatometer Rating (Timmothée Chalamet vs. Tom Holland movies)')
plt.ylabel('Rating')
plt.show()
# create a histogram of tomato meter ratings for Timmothée Chalamet movies
plt.hist(df_chalamet['tomatometer_rating'], bins=8)
plt.title('Tomatometer Rating (Timmothée Chalamet movies)')
plt.xlabel('Rating')
plt.ylabel('Number of Movies')
plt.show()
# create a scatter plot of tomato meter ratings vs. audience ratings for Timmothée Chalamet movies
plt.scatter(df_chalamet['tomatometer_rating'], df_chalamet['audience_rating'], s=8)
plt.xlabel('Tomatometer Rating')
plt.ylabel('Audience Rating')
plt.title('Tomatometer Rating vs. Audience Rating (Timmothée Chalamet movies)')
r = np.corrcoef(df_chalamet['tomatometer_rating'], df_chalamet['audience_rating'])[0, 1]
a = df_chalamet['audience_rating'].std()/df_chalamet['tomatometer_rating'].std() * r
b = df_chalamet['audience_rating'].mean() - a * df_chalamet['tomatometer_rating'].mean()
print(f'R-squared: {r**2}')
print(f'y = {b} + {a}x')
# plot the regression line
x = df_chalamet['tomatometer_rating']
y = a * x + b
plt.plot(x, y, color='red')
plt.show()
# total sum of squares for Timmothée Chalamet movies
tss = ((df_chalamet['audience_rating'] - df_chalamet['audience_rating'].mean())**2).sum()
print(f'TSS: {tss}')
# residual sum of squares for Timmothée Chalamet movies
rss = ((df_chalamet['audience_rating'] - y)**2).sum()
print(f'RSS: {rss}')
# create a residual plot
df_chalamet['residuals'] = df_chalamet['audience_rating'] - (a * df_chalamet['tomatometer_rating'] + b)
plt.scatter(df_chalamet['tomatometer_rating'], df_chalamet['residuals'], s=8)
plt.title('Residuals vs. Tomatometer Rating (Timmothée Chalamet movies)')
plt.xlabel('Tomatometer Rating')
plt.ylabel('Residuals (pounds)')
plt.show()
#create a historgram of tomato meter ratings for Tom Holland movies
plt.hist(df_tom['tomatometer_rating'], bins=10)
plt.title('Tomatometer Rating (Tom Holland movies)')
plt.xlabel('Rating')
plt.ylabel('Number of Movies')
plt.show()
# create a scatter plot of tomato meter ratings vs. audience ratings for Tom Holland movies
plt.scatter(df_tom['tomatometer_rating'], df_tom['audience_rating'], s=8)
plt.xlabel('Tomatometer Rating')
plt.ylabel('Audience Rating')
plt.title('Tomatometer Rating vs. Audience Rating (Tom Holland movies)')
r = np.corrcoef(df_tom['tomatometer_rating'], df_tom['audience_rating'])[0, 1]
a = df_tom['audience_rating'].std()/df_tom['tomatometer_rating'].std() * r
b = df_tom['audience_rating'].mean() - a * df_tom['tomatometer_rating'].mean()
print(f'R-squared {r**2}')
print(f'y = {b} + {a}x')
# plot the regression line for Tom Holland movies
x = df_tom['tomatometer_rating']
y = a * x + b
plt.plot(x, y, color='red')
plt.show()
# total sum of squares for Tom Holland movies
tss = ((df_tom['audience_rating'] - df_tom['audience_rating'].mean())**2).sum()
print(f'TSS: {tss}')
# residual sum of squares for Tom Holland movies
rss = ((df_tom['audience_rating'] - y)**2).sum()
print(f'RSS: {rss}')
# create a residual plot for Tom Holland movies
df_tom['residuals'] = df_tom['audience_rating'] - (a * df_tom['tomatometer_rating'] + b)
plt.scatter(df_tom['tomatometer_rating'], df_tom['residuals'], s=8)
plt.title('Residuals vs. Tomatometer Rating (Tom Holland movies)')
plt.xlabel('Tomatometer Rating')
plt.ylabel('Residuals (pounds)')
plt.show()
| tinuh/applied-statistics | U2 Data Project.py | U2 Data Project.py | py | 4,267 | python | en | code | 0 | github-code | 13 |
41713625591 | import requests
import json
class sudoku():
def __init__(self, matrix):
matrix[0][0] = 1
def main():
print("Ciao!")
response = requests.get("https://sugoku.herokuapp.com/board?difficulty=easy")
""" sudoku = sudoku(response) """
print(response.status_code)
""" 200 means --> successful """
response_json = response.json()
print(response_json['board'][0][0])
for i in range (len(response_json['board'][0])):
for j in range (len(response_json['board'][0])):
print(response_json['board'][j][i], end = " ")
print("\n")
if __name__ == "__main__":
main()
""" La memoria è una funzione psichica e neurale di assimilazione """ | DanieleCoppola/ProgettoSUDOKU | murgo_code.py | murgo_code.py | py | 704 | python | en | code | 0 | github-code | 13 |
15032884806 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import os
import smtplib
import requests
from dotenv import load_dotenv
from flask import Flask, request
from email import message_from_string
from email.mime.text import MIMEText
from email.mime.multipart import MIMEMultipart
from email.utils import parseaddr, make_msgid
from hashlib import md5
from langdetect import detect
from email_reply_parser import EmailReplyParser
# Retrieve environment variables
load_dotenv()
host = os.environ.get('INBOX_HOST')
user = os.environ.get('INBOX_USER')
password = os.environ.get('INBOX_PASSWORD')
fallback_lang = os.environ.get('FALLBACK_LANG')
catchall = os.environ.get('CATCHALL')
endpoint = os.environ.get('ENDPOINT')
debug = os.environ.get('DEBUG') == 'true'
port = int(os.environ.get('PORT'))
app = Flask(__name__)
# Handle POST request from Webhook
@app.route('/', methods=['POST'])
def inbox():
# Parse E-Mail
parsed_email = message_from_string(request.form.to_dict()['email'])
parsed_email_from = parseaddr(parsed_email['From'])[1]
parsed_email_to = parseaddr(parsed_email['To'])[1]
parsed_email_to_domain = parsed_email_to.split('@')[1]
parsed_email_session = md5(bytes(parsed_email.get('Subject', '').replace('Re: ', '') + parsed_email_from, encoding='utf8')).hexdigest()
parsed_email_body = ''
for part in parsed_email.walk():
if part.get_content_type() == 'text/plain':
parsed_email_body += part.get_payload()
parsed_email_body = EmailReplyParser.parse_reply(parsed_email_body)
parsed_email_lang = fallback_lang
try:
parsed_email_lang = detect(parsed_email_body)
except:
pass
# Log E-Mail
app.logger.info('Received new E-Mail')
app.logger.info('From: ' + parsed_email_from)
app.logger.info('To: ' + parsed_email_to)
app.logger.info('Text: ' + parsed_email_body)
app.logger.info('Message ID: ' + parsed_email['Message-ID'])
app.logger.info('Session ID: ' + parsed_email_session)
# Build Request
agent_id = parsed_email_to.split('@')[0]
req = {
'session': parsed_email_session,
'queryInput': {
'text': {
'text': parsed_email_body,
'languageCode': parsed_email_lang
}
},
'queryParams': {
'payload': {
'email': {
'from': parsed_email_from,
'to': parsed_email_to,
'subject': parsed_email['Subject'],
'body': parsed_email_body
}
}
}
}
# Make the request
agent = requests.get(endpoint.replace('*', agent_id))
r = requests.post(endpoint.replace('*', agent_id), json=req)
if r.status_code == 200:
# Make new E-Mail for the response
message = MIMEMultipart()
message['Message-ID'] = make_msgid()
message['In-Reply-To'] = parsed_email['Message-ID']
message['References'] = parsed_email['Message-ID']
message['From'] = agent.json()['displayName'] + ' <' + parsed_email_to + '>' if agent.json().get('displayName') else parsed_email['To']
message['To'] = parsed_email['From']
message['Subject'] = parsed_email['Subject']
# Attach the components
result = r.json()['queryResult']
if 'fulfillmentMessages' in result:
for component in result['fulfillmentMessages']:
if 'text' in component:
message.attach(MIMEText(component['text']['text'][0], 'plain'))
elif 'simpleResponses' in component:
message.attach(MIMEText(component['simpleResponses']['simpleResponses'][0]['textToSpeech'], 'plain'))
if 'webhookPayload' in result:
if 'google' in result['webhookPayload']:
for component in result['webhookPayload']['google']['richResponse']['items']:
if 'simpleResponse' in component:
message.attach(MIMEText(component['simpleResponse']['textToSpeech'], 'plain'))
# Send the E-Mail
session = smtplib.SMTP(host, 587)
session.ehlo()
session.starttls()
session.ehlo()
session.login(user, password)
session.sendmail(message['From'], message['To'], message.as_string())
# Log response status
app.logger.info('E-Mail response sent to ' + parsed_email_from)
elif r.status_code == 404 and catchall:
# Make new E-Mail for the response
message = MIMEMultipart()
message['Message-ID'] = make_msgid()
message['In-Reply-To'] = parsed_email['Message-ID']
message['Reply-To'] = parsed_email['From']
message['References'] = parsed_email['Message-ID']
message['From'] = 'no-reply@' + parsed_email_to_domain
message['To'] = catchall
message['Subject'] = parsed_email['Subject']
message.attach(MIMEText(parsed_email_body, 'plain'))
# Send the E-Mail
session = smtplib.SMTP(host, 587)
session.ehlo()
session.starttls()
session.ehlo()
session.login(user, password)
session.sendmail(message['From'], message['To'], message.as_string())
# Log response status
app.logger.info('E-Mail response sent to ' + parsed_email_from)
else:
# Log request error
app.logger.error('Request failed')
app.logger.error('Status: ' + str(r.status_code))
app.logger.error(str(r.json()))
return "OK", 200
if __name__ == '__main__':
app.run(debug=debug, host='0.0.0.0', port=port) | mishushakov/dialogflow-sendgrid | inbox.py | inbox.py | py | 5,624 | python | en | code | 34 | github-code | 13 |
74324875537 | import sevseg
import sys, time
def format_digit(digit_str):
return f'\033[1;34m{digit_str}\033[0m'
def format_separator():
return '\033[1;31m:\033[0m'
try:
while True:
print('\n' * 60)
current_time = time.localtime()
hours = str(current_time.tm_hour % 12)
if hours == '0':
hours = '12'
minutes = str(current_time.tm_min)
seconds = str(current_time.tm_sec)
hDigits = sevseg.getSevSegStr(hours, 2)
mDigits = sevseg.getSevSegStr(minutes, 2)
sDigits = sevseg.getSevSegStr(seconds, 2)
hTopRow, hMiddleRow, hBottomRow = hDigits.splitlines()
mTopRow, mMiddleRow, mBottomRow = mDigits.splitlines()
sTopRow, sMiddleRow, sBottomRow = sDigits.splitlines()
# Format and print the clock
print(f' \033[1;33m+---------------------+\033[0m ')
print(f' | {format_digit(hTopRow)} * {format_digit(mTopRow)} * {format_digit(sTopRow)} | ')
print(f' | {format_digit(hMiddleRow)} {format_separator()} {format_digit(mMiddleRow)} {format_separator()} {format_digit(sMiddleRow)} | ')
print(f' | {format_digit(hBottomRow)} {format_separator()} {format_digit(mBottomRow)} {format_separator()} {format_digit(sBottomRow)} | ')
print(f' \033[1;33m+---------------------+\033[0m ')
print()
print('Press Ctrl-C to quit.')
while True:
time.sleep(0.01)
if time.localtime().tm_sec != current_time.tm_sec:
break
except KeyboardInterrupt:
print('Digital clock')
sys.exit()
| KradThed/python_projects | digital_clock.py | digital_clock.py | py | 1,582 | python | en | code | 1 | github-code | 13 |
6234852632 |
from django.http import HttpRequest
# Create your views here.
from common import error
from lib.http import render_json
from lib.sms import send_verify_code, check_verify_code
from user.logic import save_avatar_to_location, save_avatar_to_remote
from user.models import User
from user.models import UserForm
def get_verify_code(request:HttpRequest):
'''验证码'''
phonenum=request.GET.get('phonenum')
send_verify_code(phonenum)
return render_json(None)
def login(request:HttpRequest):
'''login operate'''
phonenum=request.POST.get('phonenum')
verify_code=request.POST.get('verify_code')
# 短信服务成功了,这里先忽略,判断
if not check_verify_code(phonenum,verify_code):
raise error.UserVerifyFail()
default_data={
'nickname':phonenum,
'phonenum':phonenum,
}
user,created=User.objects.get_or_create(phonenum=phonenum,defaults=default_data)
request.session['uid'] = user.pk
attr_dict=user.to_dict(('birth_year','birth_month','birth_day'))
attr_dict['age']=user.age
return render_json(attr_dict)
def show_profile(request):
'''show profile detail'''
user=request.user
profile=user.profile
profile.save()
return render_json(profile.to_dict())
def modify_profile(request):
'''Modify profile of user'''
return render_json(None)
def upload_avatar(request:HttpRequest):
'''upload profile avatar'''
#Save image to location store
save_path,file_name=save_avatar_to_location(request)
#Save image to remote store
save_avatar_to_remote(request,save_path,file_name)
return render_json(None)
def modify_user(request:HttpRequest):
'''modify user info'''
user_form=UserForm(request.POST)
if not user_form.is_valid():
raise error.UserHttpBad()
return render_json(user_form.cleaned_data) | wei6740714/tantan | user/api.py | api.py | py | 1,873 | python | en | code | 0 | github-code | 13 |
19019123171 | from django.contrib.auth.decorators import login_required
from annoying.decorators import render_to
from forms import ImportForm
from share.decorator import no_share
from import_class import PreviewImport, DatabaseImport, BaseImport
from handle_uploads import save_php, save_upload, get_last
from backupfromphp import PHPBackup, InvalidToken, InvalidURL
@render_to('export.html')
def export(request):
return locals()
@render_to('import.html')
@no_share('NEVER')
def import_v(request):
fileform = ImportForm()
## get the previous uploaded file
## p=file, previous=dict with some metadata
p, previous = get_last(request.display_user.id)
if not request.method == 'POST':
return locals()
# whether or not we just display the cotents, or actually commit the
# changes depending on which submit button was clicked
preview = request.POST.get('preview_p') or\
request.POST.get('preview_u') or\
request.POST.get('preview_f')
if request.POST.get('import_f') or request.POST.get('preview_f'):
#the file form was used
f = request.FILES.get('file')
url = None
force_tsv = False
elif request.POST.get('import_u') or request.POST.get('preview_u'):
# the PHP backup thing was used, may throw errors
url = request.POST.get('url')
ba = PHPBackup(url)
f = None
force_tsv = True
else:
url = None
f = None
force_tsv = False
locs = {}
try:
if f:
save_upload(f, request.display_user)
elif url:
f = ba.get_file()
save_php(f, request.display_user)
#now it's go time
locs = do_import(preview, request.display_user, force_tsv)
except BaseImport.InvalidCSVError:
Error = "Not a valid CSV file"
except BaseImport.NoFileError:
Error = "Could not find File"
except InvalidToken:
Error = "Invalid Token"
except InvalidURL:
Error = "Invalid URL"
else:
# the import finished and there are no errors
if not preview:
from backup.models import edit_logbook
edit_logbook.send(sender=request.display_user)
locs2 = locals()
locs2.update(locs)
return locs2
def do_import(preview, user, force_tsv):
"""
An auxiliary view function. Returns variables that will be added to the
template context, but this function does not take a request object
"""
f, previous = get_last(user.id)
#######################################################
if not preview:
im = DatabaseImport(user, f, force_tsv)
else:
im = PreviewImport(user, f, force_tsv)
# do the actual import routine now
im.action()
from badges.utils import new_badge2
new_badge2(users=[user])
flight_out = im.flight_out
non_out = im.non_out
records_out = im.records_out
plane_out = im.plane_out
flight_header = im.flight_header
non_flight_header = im.non_flight_header
plane_header = im.plane_header
del im
return locals()
| priestc/flightloggin2 | manage/views.py | views.py | py | 3,259 | python | en | code | 18 | github-code | 13 |
1942747725 | from flask import Flask, request, jsonify
class NetManager:
def __init__(self):
self.app = Flask(__name__)
self.ip = "0.0.0.0"
self.port = 8000
self.setup_routes()
def run(self):
self.app.run(host = self.ip, port = self.port)
def setup_routes(self):
self.app.add_url_rule('/rank', 'rank', self.rank, methods=['POST'])
def rank(self):
data = request.get_json()
action = data.get('action')
if action == "send_get_rank_data_c2s":
userId = data.get('userId')
response = {
'userId': userId,
'rankIndex': 50
}
return jsonify(response)
else:
pass
| PeiXinHuang/MiniGamePro | Server/NetManager.py | NetManager.py | py | 738 | python | en | code | 0 | github-code | 13 |
70612985937 | class Solution(object):
def reverseVowels(self, s):
"""
:type s: str
:rtype: str
"""
length = len(s)
sCopy = list(s)
data = ['a','e','i','o','u','A','E','I','O','U']
left = []
right = []
for i in range(length//2):
if sCopy[i] in data:
left.insert(0,sCopy[i])
if sCopy[length-1-i] in data:
right.append(sCopy[length-1-i])
if length % 2 != 0:
if sCopy[length//2] in data:
left.insert(0,sCopy[length//2])
res = ""
vowel = right+left
for i in sCopy:
if i in data:
res += vowel.pop(0)
continue
res += i
return res
print(Solution().reverseVowels("leetocode"))
| yoonhoohwang/Algorithm | LeetCode/345. Reverse Vowels of a String.py | 345. Reverse Vowels of a String.py | py | 887 | python | en | code | 2 | github-code | 13 |
32465295299 | def binarySearch(nums, target):
start = 0
end = len(nums) - 1
while start <= end:
middle = start + (end-start)//2
if nums[middle] == target:
return middle
elif nums[middle] < target:
start = middle + 1
else:
end = middle - 1
return -1
# Pattern:
# Two pointers. Look at the middle of the array, then make a comparison with target and visited value.
# Move the pointers accordingly.
| dbasso98/LeetCode-Grind | python/binary_search.py | binary_search.py | py | 406 | python | en | code | 0 | github-code | 13 |
8965442042 | import routeros_api
import pandas as pd
def connect(host, username, password):
connection = routeros_api.RouterOsApiPool(host,
username=username,
password=password,
port=8740,
plaintext_login=True)
api = connection.get_api()
print('Conexión Establecida')
return api, connection
def disconnect(connection):
print("Desconexión Exitosa")
return connection.disconnect()
def obteneripinfo(app,ip):
lista = app.get_resource('/ip/arp')
user = lista.get(address=ip)
return user, lista
def desactivarfile(file, app):
file = pd.read_excel(file)
ip = file['DIRECCION IP']
print(len(ip))
for dire in ip:
print(dire)
_id, enable = desactivar(app, dire)
return _id, enable
def activarfile(file, app):
file = pd.read_excel(file)
ip = file['DIRECCION IP']
print(len(ip))
for dire in ip:
print(dire)
_id, enable = activar(app, dire)
return _id, enable
def activar(app, address):
ip, lista = obteneripinfo(app, address)
if ip:
_id = ip[0]['id']
lista.set(id=_id, address=address, disabled='false')
enable = lista.get(address=address)[0]['disabled']
else:
_id = ''
enable = ''
return _id, enable
def desactivar(app, address):
ip, lista = obteneripinfo(app, address)
if ip:
_id = ip[0]['id']
lista.set(id=_id, address=address, disabled='true')
enable = lista.get(address=address)[0]['disabled']
else:
_id = ''
enable = ''
return _id, enable
| edopore/api-graphic | api.py | api.py | py | 1,745 | python | en | code | 0 | github-code | 13 |
22248105122 | import speech_recognition as sr
from DataTrain import DataTrain
import pyttsx3
import time
class VoiceRecognisation:
def speech(self, string):
engine = pyttsx3.init()
query = string
r = sr.Recognizer()
with sr.Microphone() as source:
rate = engine.getProperty('rate')
engine.setProperty(rate, 1)
engine.setProperty('voice','HKEY_LOCAL_MACHINE/SOFTWArE/Microsoft/Speech/Voices/Tokens/TTS_MS_EN-US_ZIRA_11.0')
engine.runAndWait()
time.sleep(2)
engine.say(string)
print(string)
engine.runAndWait()
audio = r.listen(source)
try:
text = r.recognize_google(audio)
print("You said : {}".format(text))
except:
print("Sorry i could not recognised your voice")
text = VoiceRecognisation.speech("hello",query)
text = text.title()
FilteredQuery=DataTrain.traindata("Data Train",text)
str_join = " ".join(FilteredQuery)
print(str_join)
return str_join
| bornwinner54/HelloQuery | HelloQuery/VoiceRecognisation.py | VoiceRecognisation.py | py | 1,135 | python | en | code | 0 | github-code | 13 |
14551993603 | def p_function(word):
word_len = len(word)
p_values = [0 for _ in range(word_len)]
for i in range(1, word_len):
k = p_values[i - 1]
while k > 0 and word[i] != word[k]:
k = p_values[k - 1]
if word[i] == word[k]:
k += 1
p_values[i] = k
return p_values
def kmp(word, string):
kmp_string = word + '#' + string
p_values = p_function(kmp_string)
indices = [ind - 2 * len(word) + 1 for ind in range(len(kmp_string)) if p_values[ind] == len(word)]
return indices
inp_word = input()
inp_string = input()
answer = kmp(inp_word, inp_string)
print(len(answer))
print(*answer, sep=' ')
| StepDan23/MADE_algorithms | hw_14/c.py | c.py | py | 665 | python | en | code | 0 | github-code | 13 |
71084000979 | courses = {}
while True:
command = input()
if command == 'end':
break
data = command.split(' : ')
course_name = data[0]
student_name = data[1]
if course_name not in courses.keys():
courses[course_name]=[]
courses[course_name].append(student_name)
for course in courses.keys():
print(f'{course}: {len(courses[course])}')
for student in courses[course]:
print(f'-- {student}')
| bobsan42/SoftUni-Learning-42 | ProgrammingFunadamentals/a25DictionariesExrecises/courses.py | courses.py | py | 437 | python | en | code | 0 | github-code | 13 |
70838594579 | import os
import logging
import gin
import typing
import cv2
import numpy as np
import pandas as pd
import torch
import matplotlib.pyplot as plt
from soccer_robot_perception.utils.metrics import (
calculate_metrics,
get_confusion_matrix,
calculate_iou,
calculate_det_metrics,
iou_metrics_preprocess,
)
from soccer_robot_perception.utils.detection_utils import center_of_shape, plot_blobs
LOGGER = logging.getLogger(__name__)
@gin.configurable
def evaluate_model_old(
model_path: str,
report_output_path: str,
seg_criterion: torch.nn,
det_criterion: typing.Callable,
net: torch.nn.Module,
data_loaders: typing.Tuple[
torch.utils.data.DataLoader,
torch.utils.data.DataLoader,
torch.utils.data.DataLoader,
],
wandb_key,
loss_factor: float = 0.5,
num_classes: int = 3,
visualize: bool = False,
input_width: int = 640,
input_height: int = 480,
run_name="soccer-robot",
) -> None:
"""
This function evaluates the model trained on a set of test image and provides a report with evaluation metrics.
The evaluation metrics used are: Precision, Recall and F-score.
The module also aids in visualizing the predictions and groundtruth labels.
Args:
model_path: string
Path of the model to be used for inference
report_output_path: string
Path for writing the inference output report with evaluation metrics and visualization images
criterion: torch.nn
Loss type for evaluation
net: torch.nn.Module
Network architecture of the model trained
dataset_class: typing.Union[Dataset, ConcatDataset]
Specifies the dataset to use for inference.
The datasets available are: GINI, ICDAR and GINI_ICDAR provided in gin config
collate: typing.Callable
Function in utils custom_collate for gathering the dataset keys
visualize: bool
To visualize the model predictions alongside groundtruth prediction
"""
if not os.path.exists(os.path.dirname(report_output_path)):
LOGGER.info(
"Output directory does not exist. Creating directory %s",
os.path.dirname(report_output_path),
)
os.makedirs(os.path.dirname(report_output_path))
if visualize and (
not os.path.exists(os.path.join(report_output_path, "output_images"))
):
os.makedirs(os.path.join(report_output_path, "output_images"))
LOGGER.info(
"Saving images in the directory: %s",
os.path.join(report_output_path, "output_images"),
)
device = torch.device("cpu")
state_test = torch.load(model_path, map_location=device)
net.load_state_dict(state_test)
net.eval()
# instantiate dataset
train_loader, valid_loader, test_loader = data_loaders
LOGGER.info("Evaluating Soccer Robot Perception using the model, %s", model_path)
LOGGER.info("Results will be written to the path, %s", report_output_path)
LOGGER.info("Ready to start evaluating!")
df_seg_columns = [
"seg loss",
"precision",
"recall",
"f1-score",
"accuracy",
]
df_det_columns = [
"det loss",
"tp",
"fp",
"tn",
"fn",
"precision",
"recall",
"f1-score",
"accuracy",
"fdr",
]
df_micro = pd.DataFrame(columns=df_seg_columns)
df_macro = pd.DataFrame(columns=df_seg_columns)
df_iou = pd.DataFrame(columns=["bg", "field", "lines"])
confusion_matrix_array = np.zeros((num_classes, num_classes))
precision_per_class = np.zeros((num_classes))
recall_per_class = np.zeros((num_classes))
f1score_per_class = np.zeros((num_classes))
accuracy_per_class = np.zeros((num_classes))
df_det_ball = pd.DataFrame(columns=df_det_columns)
df_det_robot = pd.DataFrame(columns=df_det_columns)
df_det_goalpost = pd.DataFrame(columns=df_det_columns)
for loader in test_loader:
for data in loader:
LOGGER.info("Predicting on image: %d", len(df_micro) + 1)
input_image = data["image"]
det_out, seg_out = net(input_image)
det_out_collected = []
det_target_collected = []
seg_out_collected = []
seg_target_collected = []
# To calculate loss for each data
for n, i in enumerate(data["dataset_class"]):
if i == "detection":
det_target_collected.append(data["det_target"][n].unsqueeze_(0))
det_out_collected.append(det_out[n].unsqueeze_(0))
else:
seg_target_collected.append(data["seg_target"][n].unsqueeze_(0))
seg_out_collected.append(seg_out[n].unsqueeze_(0))
if len(det_target_collected) != 0:
det_target_tensor = torch.cat(det_target_collected, dim=0)
det_out_tensor = torch.cat(det_out_collected, dim=0)
det_loss = det_criterion(det_out_tensor, det_target_tensor)
ball_points = center_of_shape(det_out[0][0].detach().numpy(), 1)
robot_points = center_of_shape(det_out[0][1].detach().numpy(), 2)
goalpost_points = center_of_shape(det_out[0][2].detach().numpy(), 3)
# ball_points = [
# [53.0, 91.0, 1.0],
# [61, 81, 1],
# [0, 19, 1],
# ]
#
# robot_points = [
# [50, 50, 2],
# [98.0, 12.0, 2.0],
# [14.0, 65.0, 2.0],
# [89.0, 87.0, 2.0],
# ]
# goalpost_points = [[13., 109., 3.],
# [13., 112., 3.],
# [13., 113., 3.],
# ]
# goalpost_points = []
blob_map = np.zeros(
(3, int(input_height / 4), int(input_width / 4))
)
ball_map = plot_blobs(ball_points, 6)
robot_map = plot_blobs(robot_points, 12)
goalpost_map = plot_blobs(goalpost_points, 6)
blob_map[0] = ball_map
blob_map[1] = robot_map
blob_map[2] = goalpost_map
if len(det_target_collected) != 0:
(
tp,
fp,
tn,
fn,
precision,
recall,
f1,
accuracy,
fdr,
) = calculate_det_metrics(ball_points, data["blob_centers"][0], 1)
df_det_ball.loc[len(df_det_ball)] = [
det_loss.detach().numpy(),
tp,
fp,
tn,
fn,
precision,
recall,
f1,
accuracy,
fdr,
]
(
tp,
fp,
tn,
fn,
precision,
recall,
f1,
accuracy,
fdr,
) = calculate_det_metrics(robot_points, data["blob_centers"][0], 2)
df_det_robot.loc[len(df_det_robot)] = [
det_loss.detach().numpy(),
tp,
fp,
tn,
fn,
precision,
recall,
f1,
accuracy,
fdr,
]
(
tp,
fp,
tn,
fn,
precision,
recall,
f1,
accuracy,
fdr,
) = calculate_det_metrics(goalpost_points, data["blob_centers"][0], 3)
df_det_goalpost.loc[len(df_det_goalpost)] = [
det_loss.detach().numpy(),
tp,
fp,
tn,
fn,
precision,
recall,
f1,
accuracy,
fdr,
]
else:
det_loss = torch.tensor(
0, dtype=torch.float32, requires_grad=True, device=device
)
df_det_ball.loc[len(df_det_ball)] = [
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
]
df_det_robot.loc[len(df_det_robot)] = [
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
]
df_det_goalpost.loc[len(df_det_goalpost)] = [
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
]
if len(seg_target_collected) != 0:
seg_target_tensor = torch.cat(seg_target_collected, dim=0)
seg_out_tensor = torch.cat(seg_out_collected, dim=0)
seg_loss = seg_criterion(seg_out_tensor, seg_target_tensor.long())
seg_out_max = torch.argmax(seg_out_tensor, dim=1)
outputs_seg_flatten = torch.flatten(seg_out_max, start_dim=0).unsqueeze_(0)
labels_seg_flatten = torch.flatten(
seg_target_tensor, start_dim=0
).unsqueeze_(0)
(
target_bg_iou_map,
target_field_iou_map,
target_lines_iou_map,
) = iou_metrics_preprocess(seg_target_tensor)
(
output_bg_iou_map,
output_field_iou_map,
output_lines_iou_map,
) = iou_metrics_preprocess(seg_out_max)
iou_bg = calculate_iou(target_bg_iou_map, output_bg_iou_map)
iou_field = calculate_iou(target_field_iou_map, output_field_iou_map)
iou_lines = calculate_iou(target_lines_iou_map, output_lines_iou_map)
df_iou.loc[len(df_iou)] = [
iou_bg.detach().item(),
iou_field.detach().item(),
iou_lines.detach().item(),
]
precision, recall, f1score, accuracy = calculate_metrics(
labels_seg_flatten.detach().numpy(),
outputs_seg_flatten.detach().numpy(),
False,
"micro",
)
df_micro.loc[len(df_micro)] = [
seg_loss.detach().numpy(),
precision,
recall,
f1score,
accuracy,
]
precision, recall, f1score, accuracy = calculate_metrics(
labels_seg_flatten.detach().numpy(),
outputs_seg_flatten.detach().numpy(),
False,
"macro",
)
df_macro.loc[len(df_macro)] = [
seg_loss.detach().numpy(),
precision,
recall,
f1score,
accuracy,
]
image_precision, image_recall, image_f1score, _ = calculate_metrics(
labels_seg_flatten.detach().numpy(),
outputs_seg_flatten.detach().numpy(),
True,
)
precision_per_class = precision_per_class + image_precision
recall_per_class = recall_per_class + image_recall
f1score_per_class = f1score_per_class + image_f1score
confusion_matrix_array = confusion_matrix_array + get_confusion_matrix(
labels_seg_flatten.detach().numpy(),
outputs_seg_flatten.detach().numpy(),
)
accuracy_per_class = accuracy_per_class + (
confusion_matrix_array.diagonal() / confusion_matrix_array.sum(axis=1)
)
else:
seg_loss = torch.tensor(
0, dtype=torch.float32, requires_grad=True, device=device
)
df_iou.loc[len(df_iou)] = [0, 0, 0]
df_micro.loc[len(df_micro)] = [
seg_loss.detach().numpy(),
0,
0,
0,
0,
]
df_macro.loc[len(df_macro)] = [
seg_loss.detach().numpy(),
0,
0,
0,
0,
]
precision_per_class = precision_per_class + 0
recall_per_class = recall_per_class + 0
f1score_per_class = f1score_per_class + 0
accuracy_per_class = accuracy_per_class + 0
confusion_matrix_array = confusion_matrix_array + 0
loss = seg_loss + det_loss
LOGGER.info(
"image: %d, loss: %f, segment loss: %f, regression loss: %f",
len(df_micro),
loss.item(),
seg_loss.item(),
det_loss.item(),
)
if visualize:
new_image = input_image[0].permute(1, 2, 0).detach().numpy()
plt.subplot(231)
plt.imshow(cv2.resize(new_image, (160, 120), cv2.INTER_NEAREST))
plt.title("Input")
plt.subplot(232)
plt.imshow((det_out[0].detach().permute(1, 2, 0).numpy() * 255).astype(np.uint8))
plt.title("Det out")
plt.subplot(233)
plt.imshow((torch.argmax(seg_out, dim=1)[0].detach().numpy()), cmap="gray")
plt.title("Seg out")
if len(det_target_collected) != 0:
plt.subplot(234)
plt.imshow((data["det_target"][n][0].detach().permute(1, 2, 0).numpy() * 255).astype(np.uint8))
plt.title("Det tar")
else:
plt.subplot(234)
plt.imshow(np.zeros((120, 160)), cmap='gray')
plt.title("Det tar")
if len(seg_target_collected) != 0:
plt.subplot(235)
plt.imshow(data["seg_target"][n][0].numpy(), cmap="gray")
plt.title("Seg tar")
else:
plt.subplot(235)
plt.imshow(np.zeros((120, 160)), cmap='gray')
plt.title("Seg tar")
plt.subplot(236)
plt.imshow((np.transpose(blob_map, (1, 2, 0)) * 255).astype(np.uint8))
plt.title("Blobs")
plt.savefig(
report_output_path
+ "/output_images/"
+ str(len(df_micro) + 1)
+ "_pred.jpg"
)
plt.close()
df_iou.loc["mean"] = df_iou.mean()
df_micro.loc["mean"] = df_micro.mean()
df_macro.loc["mean"] = df_macro.mean()
df_confusion_matrix = pd.DataFrame(confusion_matrix_array / len(df_micro))
df_precision_per_class = pd.DataFrame(precision_per_class / len(df_micro))
df_recall_per_class = pd.DataFrame(recall_per_class / len(df_micro))
df_f1score_per_class = pd.DataFrame(f1score_per_class / len(df_micro))
df_accuracy_per_class = pd.DataFrame(accuracy_per_class / len(df_micro))
df_det_ball.loc["mean"] = df_det_ball.mean()
df_det_robot.loc["mean"] = df_det_robot.mean()
df_det_goalpost.loc["mean"] = df_det_goalpost.mean()
excel_writer = pd.ExcelWriter(
os.path.join(report_output_path, "report.xlsx"), engine="xlsxwriter"
)
df_micro.to_excel(excel_writer, sheet_name="micro")
df_macro.to_excel(excel_writer, sheet_name="macro")
df_iou.to_excel(excel_writer, sheet_name="iou")
df_confusion_matrix.to_excel(excel_writer, sheet_name="normalized_confusion_matrix")
df_precision_per_class.to_excel(excel_writer, sheet_name="precision_per_class")
df_recall_per_class.to_excel(excel_writer, sheet_name="recall_per_class")
df_f1score_per_class.to_excel(excel_writer, sheet_name="f1score_per_class")
df_accuracy_per_class.to_excel(excel_writer, sheet_name="accuracy_per_class")
df_det_ball.to_excel(excel_writer, sheet_name="ball_det")
df_det_robot.to_excel(excel_writer, sheet_name="robot_det")
df_det_goalpost.to_excel(excel_writer, sheet_name="goalpost_det")
excel_writer.save()
LOGGER.info("Results were written to %s", report_output_path)
| DeepanChakravarthiPadmanabhan/Soccer_Robot_Perception | soccer_robot_perception/evaluate/evaluate_model_concatenated_dataset.py | evaluate_model_concatenated_dataset.py | py | 17,452 | python | en | code | 1 | github-code | 13 |
7383216239 | from aws_cdk import (
Duration,
Stack,
aws_s3 as s3,
aws_cloudfront as cloudfront,
aws_cloudfront_origins as origins,
aws_certificatemanager as acm,
aws_route53 as rt53,
aws_route53_targets as targets,
aws_codestarconnections as codestar,
aws_codepipeline as codepipeline,
aws_codepipeline_actions as cp_actions,
aws_codebuild as codebuild,
aws_codestarnotifications as notifications,
aws_iam as iam,
aws_sns as sns,
RemovalPolicy,
)
from constructs import Construct
class InfrastructureStack(Stack):
def __init__(self, scope: Construct, construct_id: str, domain_name: str,base_domain:str, repo_owner: str, repo_name: str, repo_branch: str, **kwargs) -> None:
super().__init__(scope, construct_id, **kwargs)
# Create an S3 bucket for the Static Site
static_bucket = s3.Bucket(
self,'static-site-bucket',
encryption=s3.BucketEncryption.S3_MANAGED,
versioned=True,
removal_policy=RemovalPolicy.DESTROY,
block_public_access=s3.BlockPublicAccess(block_public_acls=False,block_public_policy=False,ignore_public_acls=False,restrict_public_buckets=False),
website_index_document="index.html",
website_error_document="404.html",
public_read_access=True
)
static_bucket.add_lifecycle_rule(
enabled=True,
expired_object_delete_marker=True,
abort_incomplete_multipart_upload_after=Duration.days(10),
noncurrent_versions_to_retain=5,
noncurrent_version_expiration=Duration.days(60)
)
# Create an S3 bucket for temp media resource
media_bucket = s3.Bucket(
self,'media-bucket',
encryption=s3.BucketEncryption.S3_MANAGED,
versioned=True,
removal_policy=RemovalPolicy.DESTROY,
block_public_access=s3.BlockPublicAccess.BLOCK_ALL
)
media_bucket.add_lifecycle_rule(
enabled=True,
expired_object_delete_marker=True,
abort_incomplete_multipart_upload_after=Duration.days(10),
noncurrent_versions_to_retain=5,
noncurrent_version_expiration=Duration.days(60)
)
# Lookup Route53 Hosted Zone
hz = rt53.HostedZone.from_lookup(self,
"hosted_zone",
domain_name = base_domain
)
# Create ACM Cert & DNS Validation
# https://docs.aws.amazon.com/cdk/api/v2/python/aws_cdk.aws_certificatemanager/Certificate.html
acm_static_site_cert = acm.Certificate(self,"ACMStaticSiteCert",
domain_name=domain_name,
subject_alternative_names=[f"www.{domain_name}"],
validation=acm.CertificateValidation.from_dns(hz)
)
# Create CloudFront OAI
# https://docs.aws.amazon.com/cdk/api/v2/python/aws_cdk.aws_cloudfront/OriginAccessIdentity.html
cf_oai = cloudfront.OriginAccessIdentity(self, "OAI", comment="access to static site media bucket")
# Create CloudFront Distribution
# https://docs.aws.amazon.com/cdk/api/v2/python/aws_cdk.aws_cloudfront/Distribution.html
cf_distro = cloudfront.Distribution(self,'distro',
enabled= True,
certificate=acm_static_site_cert,
comment="Distro to host the static site",
domain_names= [
domain_name,
f"www.{domain_name}"
],
price_class=cloudfront.PriceClass.PRICE_CLASS_100,
default_root_object="index.html",
default_behavior=cloudfront.BehaviorOptions(
allowed_methods=cloudfront.AllowedMethods.ALLOW_GET_HEAD_OPTIONS,
viewer_protocol_policy=cloudfront.ViewerProtocolPolicy.REDIRECT_TO_HTTPS,
origin=origins.S3Origin(
origin_path="/",
bucket=static_bucket
)
),
minimum_protocol_version=cloudfront.SecurityPolicyProtocol.TLS_V1_2_2018,
http_version=cloudfront.HttpVersion.HTTP2_AND_3
)
media_distro = cloudfront.Distribution(self,'mediaDistro',
enabled= True,
comment="Distro to host the media for the static site",
price_class=cloudfront.PriceClass.PRICE_CLASS_100,
default_root_object="index.html",
default_behavior=cloudfront.BehaviorOptions(
allowed_methods=cloudfront.AllowedMethods.ALLOW_GET_HEAD_OPTIONS,
viewer_protocol_policy=cloudfront.ViewerProtocolPolicy.REDIRECT_TO_HTTPS,
origin=origins.S3Origin(
origin_access_identity=cf_oai,
origin_path="/",
bucket=media_bucket
)
),
minimum_protocol_version=cloudfront.SecurityPolicyProtocol.TLS_V1_2_2018,
http_version=cloudfront.HttpVersion.HTTP2_AND_3
)
# Create Route 53 record for CloudFront Distro
# https://docs.aws.amazon.com/cdk/api/v2/python/aws_cdk.aws_route53/ARecord.html
# Route 53 record for apex record
rt53.ARecord(self,"CFAliasRecord",
zone=hz,
target=rt53.RecordTarget.from_alias(targets.CloudFrontTarget(cf_distro)),
record_name=f"{domain_name}"
)
# Route 53 record to for www
rt53.ARecord(self,"CfWWWAliasRecord",
zone=hz,
target=rt53.RecordTarget.from_alias(targets.CloudFrontTarget(cf_distro)),
record_name=f"www.{domain_name}"
)
# Create CodeStar Connection for GitHub integration
# https://docs.aws.amazon.com/cdk/api/v2/python/aws_cdk.aws_codestarconnections/CfnConnection.html
# Note: This connection will be created in a pending state and must be completed on the AWS Console
codestar_github_connection = codestar.CfnConnection(self,"GithubConnection",
connection_name="jekyll-static-site",
provider_type="GitHub"
)
# Create CodePipeline for CI/CD
# https://docs.aws.amazon.com/cdk/api/v2/python/aws_cdk.aws_codepipeline/Pipeline.html
# Create the pipeline
pipeline = codepipeline.Pipeline(self,"pipeline",
pipeline_name="static-blog",
)
# Apply a removal policy to destroy the S3 artifacts bucket when the pipeline is destroyed
pipeline.apply_removal_policy(RemovalPolicy.DESTROY)
# Stage: Pull repo from Github
source_artifact = codepipeline.Artifact()
source_stage = pipeline.add_stage(stage_name="Source")
source_stage.add_action(cp_actions.CodeStarConnectionsSourceAction(
action_name="Github-Source",
owner=repo_owner,
repo=repo_name,
connection_arn=codestar_github_connection.get_att("ConnectionArn").to_string(),
branch=repo_branch,
output=source_artifact
))
# Stage: Build Static Site and push to artifact
build_artifact = codepipeline.Artifact()
build_jekyll_site = codebuild.PipelineProject(
scope=self,
id="BuildJekyllSite",
build_spec=codebuild.BuildSpec.from_object(
dict(
version="0.2",
phases={
"install":{
"commands": [
"cd blog",
"gem install bundler jekyll:4.2.2",
"bundle install"
]
},
"build": {
"commands": [
"JEKYLL_ENV=production bundle exec jekyll build"
]
}
},
artifacts={
"files": ["**/*"],
"base-directory": "blog/_site",
"name": "jekyll-static-blog-$(date +%Y-%m-%d)",
},
)
),
)
build_stage = pipeline.add_stage(stage_name="Build-Site")
build_stage.add_action(cp_actions.CodeBuildAction(
action_name="Build-Static-Site",
project=build_jekyll_site,
input=source_artifact,
run_order=1,
outputs=[build_artifact]
))
# Stage: Deploy static site from artifact created in build stage
deploy_stage = pipeline.add_stage(stage_name="Deploy-Site")
# Action: Move media asset files from temp media bucket to static-site bucket
# Action: Deploy Static Site code from build artficat to S3 origin bucket
deploy_stage.add_action(cp_actions.S3DeployAction(
bucket=static_bucket,
input=build_artifact,
action_name="Deploy-To-S3",
run_order=1
))
# Stage: Create invalidation in cloudfront to force it to recache files from origin
cf_invalidate_iam_role = iam.Role(
scope=self,
id="invalidationRole",
assumed_by=iam.ServicePrincipal("codebuild.amazonaws.com"),
)
cf_invalidate_iam_role.add_to_policy(
iam.PolicyStatement(
effect=iam.Effect.ALLOW,
resources=[
f"arn:aws:cloudfront::{self.account}:distribution/{cf_distro.distribution_id}"
],
actions=["cloudfront:CreateInvalidation"],
)
)
cloudfront_invalidate = codebuild.PipelineProject(
scope=self,
id="CloudFrontInvalidateProject",
role=cf_invalidate_iam_role,
build_spec=codebuild.BuildSpec.from_object(
dict(
version="0.2",
phases={
"build": {
"commands": [
f"aws cloudfront create-invalidation --distribution-id '{cf_distro.distribution_id}' --paths '/*'"
]
}
},
)
),
)
update_cf_stage = pipeline.add_stage(stage_name="Update-Cloudfront")
update_cf_stage.add_action(cp_actions.CodeBuildAction(
action_name="Invalidate-CloudFront",
project=cloudfront_invalidate,
input=build_artifact,
run_order=1
))
# Create SNS Topic for Deployment
# deployment_sns_topic = sns.Topic(self,"deployment_sns_topic",
# topic_name="jekyll-blog-deployment",
# display_name="jekyll-blog-deployment")
# Add a rule to the codepipeline to allow it to publish to the SNS Topic
# deployment_notification_rule = notifications.NotificationRule(self,
# "deployment_notification_rule",
# source=pipeline,
# events=["codepipeline-pipeline-pipeline-execution-failed",
# "codepipeline-pipeline-pipeline-execution-succeeded",
# "codepipeline-pipeline-pipeline-execution-started",
# "codepipeline-pipeline-pipeline-execution-canceled"],
# targets=[deployment_sns_topic]) | cleanslate-technology-group/indyaws-cdk-python-jekyll-blog | infrastructure/infrastructure/infrastructure_stack.py | infrastructure_stack.py | py | 11,422 | python | en | code | 0 | github-code | 13 |
37879076422 | from agentes import othello
import timeit
black=othello.minimax_searcher(3, othello.score)
white=othello.alphabeta_searcher(3, othello.score)
startt = timeit.default_timer()
# black, white = get_players()
board, score = othello.play(black, white)
elapsed = timeit.default_timer() - startt # en segundos
cl = elapsed
print('Tiempo de ejecucion: ', elapsed, 'segundos')
print('Tiempo de ejecucion: ', elapsed / 60, 'minutos') | Unnamed10110/AI | ia-t2-master/agentes/sub/othello/tests.py | tests.py | py | 429 | python | en | code | 0 | github-code | 13 |
9856577085 | # Задание 5**
# Создайте новый столбец в датафрейме authors_price под названием cover, в нем будут располагаться данные о том,
# какая обложка у данной книги - твердая или мягкая. В этот столбец поместите данные из следующего списка:
# ['твердая', 'мягкая', 'мягкая', 'твердая', 'твердая', 'мягкая', 'мягкая'].
# Просмотрите документацию по функции pd.pivot_table с помощью вопросительного знака.
# Для каждого автора посчитайте суммарную стоимость книг в твердой и мягкой обложке.
# Используйте для этого функцию pd.pivot_table. При этом столбцы должны называться "твердая" и "мягкая",
# а индексами должны быть фамилии авторов. Пропущенные значения стоимостей заполните нулями,
# при необходимости загрузите библиотеку Numpy.
# Назовите полученный датасет book_info и сохраните его в формат pickle под названием "book_info.pkl".
# Затем загрузите из этого файла датафрейм и назовите его book_info2. Удостоверьтесь,
# что датафреймы book_info и book_info2 идентичны.
import pandas as pd
import numpy as np
authors = pd.DataFrame({'author_id': [1, 2, 3],
'author_name': ['Тургенев', 'Чехов', 'Островский']})
book = pd.DataFrame({'author_id': [1, 1, 1, 2, 2, 3, 3],
'book_title': ['Отцы и дети', 'Рудин', 'Дворянское гнездо', 'Толстый и тонкий', 'Дама с собачкой', 'Гроза', 'Таланты и поклонники'],
'price': [450, 300, 350, 500, 450, 370, 290]})
authors_price = pd.merge(authors, book, on='author_id')
authors_price['cover'] = ['твердая', 'мягкая', 'мягкая', 'твердая', 'твердая', 'мягкая', 'мягкая']
book_info = pd.pivot_table(authors_price, values='price', index=['author_name'], columns=['cover'], aggfunc=np.sum)
book_info.fillna(0, inplace=True)
print(book_info)
book_info.to_pickle('book_info.pkl')
book_info2 = pd.read_pickle('book_info.pkl')
print('*'*30)
print(book_info == book_info2)
| ZV8/GB-Python-libraries-for-DS | Lesson_2/Working with data in Pandas/5.py | 5.py | py | 2,682 | python | ru | code | 0 | github-code | 13 |
2625060935 | # -*- coding: utf-8 -*-
import pytz
import datetime
import json
from pyramid.view import view_config
from stalker import db, Project, Status, Entity, Invoice, Budget, Client, Payment
from stalker.db.session import DBSession
import transaction
from webob import Response
import stalker_pyramid
import logging
#logger = logging.getLogger(__name__)
#logger.setLevel(logging.DEBUG)
from stalker_pyramid import logger_name
logger = logging.getLogger(logger_name)
@view_config(
route_name='create_invoice_dialog',
renderer='templates/invoice/dialog/invoice_dialog.jinja2'
)
def create_invoice_dialog(request):
"""called when creating invoice
"""
logger.debug(
'create_invoice_dialog'
)
came_from = request.params.get('came_from', '/')
# logger.debug('came_from %s: '% came_from)
# get logged in user
from stalker_pyramid.views import get_logged_in_user,\
milliseconds_since_epoch
logged_in_user = get_logged_in_user(request)
budget_id = request.params.get('budget_id', -1)
budget = Budget.query.filter(Budget.id == budget_id).first()
if not budget:
return Response('No budget found with id: %s' % budget_id, 500)
from stalker_pyramid.views.auth import PermissionChecker
return {
'has_permission': PermissionChecker(request),
'logged_in_user': logged_in_user,
'budget': budget,
'came_from': came_from,
'mode': 'Create',
'milliseconds_since_epoch': milliseconds_since_epoch
}
@view_config(
route_name='create_invoice'
)
def create_invoice(request):
"""runs when creating a invoice
"""
from stalker_pyramid.views import get_logged_in_user, milliseconds_since_epoch
logged_in_user = get_logged_in_user(request)
utc_now = datetime.datetime.now(pytz.utc)
description = request.params.get('description')
amount = request.params.get('amount')
unit = request.params.get('unit')
budget_id = request.params.get('budget_id', -1)
budget = Budget.query.filter(Budget.id == budget_id).first()
client_id = request.params.get('client_id', -1)
client = Client.query.filter(Client.id == client_id).first()
logger.debug("client %s" % client.id)
if not client:
return Response('Please supply a client', 500)
if not description:
return Response('Please supply a description', 500)
if not budget:
return Response('There is no budget with id: %s' % budget_id, 500)
invoice = Invoice(
budget=budget,
client=client,
amount=int(amount),
unit=unit,
description=description,
created_by=logged_in_user,
date_created=utc_now,
date_updated=utc_now
)
DBSession.add(invoice)
return Response('Invoice Created successfully')
@view_config(
route_name='create_invoice_dialog',
renderer='templates/invoice/dialog/invoice_dialog.jinja2'
)
def create_invoice_dialog(request):
"""called when creating invoice
"""
logger.debug(
'create_invoice_dialog'
)
came_from = request.params.get('came_from', '/')
# logger.debug('came_from %s: '% came_from)
# get logged in user
from stalker_pyramid.views import get_logged_in_user,\
milliseconds_since_epoch
logged_in_user = get_logged_in_user(request)
budget_id = request.params.get('budget_id', -1)
budget = Budget.query.filter(Budget.id == budget_id).first()
if not budget:
return Response('No budget found with id: %s' % budget_id, 500)
from stalker_pyramid.views.auth import PermissionChecker
return {
'has_permission': PermissionChecker(request),
'logged_in_user': logged_in_user,
'budget': budget,
'came_from': came_from,
'mode': 'Create',
'milliseconds_since_epoch': milliseconds_since_epoch
}
@view_config(
route_name='create_payment_dialog',
renderer='templates/invoice/dialog/payment_dialog.jinja2'
)
def create_payment_dialog(request):
"""called when creating invoice
"""
logger.debug(
'create_payment_dialog'
)
came_from = request.params.get('came_from', '/')
# logger.debug('came_from %s: '% came_from)
# get logged in user
from stalker_pyramid.views import get_logged_in_user,\
milliseconds_since_epoch
logged_in_user = get_logged_in_user(request)
invoice_id = request.params.get('invoice_id', -1)
invoice = Invoice.query.filter(Invoice.id == invoice_id).first()
if not invoice:
return Response('No invoice found with id: %s' % invoice_id, 500)
from stalker_pyramid.views.auth import PermissionChecker
return {
'has_permission': PermissionChecker(request),
'logged_in_user': logged_in_user,
'invoice': invoice,
'came_from': came_from,
'mode': 'Create',
'milliseconds_since_epoch': milliseconds_since_epoch
}
@view_config(
route_name='create_payment'
)
def create_payment(request):
"""runs when creating a payment
"""
from stalker_pyramid.views import get_logged_in_user, milliseconds_since_epoch
logged_in_user = get_logged_in_user(request)
utc_now = datetime.datetime.now(pytz.utc)
description = request.params.get('description')
amount = request.params.get('amount')
unit = request.params.get('unit')
invoice_id = request.params.get('invoice_id', -1)
invoice = Invoice.query.filter(Invoice.id == invoice_id).first()
if not invoice:
return Response('Please supply a invoice', 500)
if not description:
return Response('Please supply a description', 500)
payment = Payment(
invoice=invoice,
amount=int(amount),
unit=unit,
description=description,
created_by=logged_in_user,
date_created=utc_now,
date_updated=utc_now
)
DBSession.add(invoice)
return Response('Invoice Created successfully')
@view_config(
route_name='update_payment'
)
def update_payment(request):
"""edits the edit_payment with data from request
"""
logger.debug('***edit edit_payment method starts ***')
from stalker_pyramid.views import get_logged_in_user
logged_in_user = get_logged_in_user(request)
utc_now = datetime.datetime.now(pytz.utc)
payment_id = request.params.get('id')
payment = Payment.query.filter_by(id=payment_id).first()
if not payment:
transaction.abort()
return Response('There is no payment with id %s' % id, 500)
amount = request.params.get('amount', None)
unit = request.params.get('unit', None)
description = request.params.get('description', None)
if amount and unit and description:
payment.amount = int(amount)
payment.unit = unit
payment.description = description
payment.date_updated = utc_now
payment.updated_by = logged_in_user
request.session.flash(
'success:updated %s payment!' % payment.id
)
return Response('successfully updated %s payment!' % payment.id)
@view_config(
route_name='list_payment_dialog',
renderer='templates/invoice/dialog/list_payment_dialog.jinja2'
)
def list_payment_dialog(request):
"""called when creating invoice
"""
logger.debug(
'list_payment_dialog'
)
came_from = request.params.get('came_from', '/')
# logger.debug('came_from %s: '% came_from)
# get logged in user
from stalker_pyramid.views import get_logged_in_user,\
milliseconds_since_epoch
logged_in_user = get_logged_in_user(request)
invoice_id = request.params.get('invoice_id', -1)
invoice = Invoice.query.filter(Invoice.id == invoice_id).first()
if not invoice:
return Response('No invoice found with id: %s' % invoice_id, 500)
from stalker_pyramid.views.auth import PermissionChecker
return {
'has_permission': PermissionChecker(request),
'logged_in_user': logged_in_user,
'invoice': invoice,
'came_from': came_from,
'mode': 'Update',
'milliseconds_since_epoch': milliseconds_since_epoch
}
@view_config(
route_name='get_entity_invoices',
renderer='json'
)
@view_config(
route_name='get_budget_invoices',
renderer='json'
)
def get_invoices(request):
"""returns invoices with the given id
"""
entity_id = request.matchdict.get('id')
entity = Entity.query.filter(Entity.id == entity_id).first()
logger.debug(
'get_invoices is working for the entity which id is: %s' % entity_id
)
sql_query = """
select
"Invoices".id,
"Invoice_SimpleEntities".name,
"Invoices".amount,
"Invoices".unit,
"Invoice_SimpleEntities".description,
(extract(epoch from "Invoice_SimpleEntities".date_created) * 1000)::bigint as date_created,
"Clients".id,
"Client_SimpleEntities".name,
array_agg("Payments".amount::float/"Invoices".amount*100),
sum("Payments".amount)::float/"Invoices".amount *100 as percent
from "Invoices"
join "SimpleEntities" as "Invoice_SimpleEntities" on "Invoice_SimpleEntities".id = "Invoices".id
join "Budgets" on "Budgets".id = "Invoices".budget_id
join "Projects" on "Projects".id = "Budgets".project_id
join "Clients" on "Clients".id = "Invoices".client_id
join "SimpleEntities" as "Client_SimpleEntities" on "Client_SimpleEntities".id = "Clients".id
left outer join "Payments" on "Payments".invoice_id = "Invoices".id
where %(where_condition)s
group by
"Invoices".id,
"Invoice_SimpleEntities".name,
"Invoices".amount,
"Invoices".unit,
"Invoice_SimpleEntities".description,
"Invoice_SimpleEntities".date_created,
"Clients".id,
"Client_SimpleEntities".name
"""
where_condition = ''
if entity.entity_type == "Budget":
where_condition = '"Budgets".id=%s' % entity.id
if entity.entity_type == "Project":
where_condition = '"Projects".id=%s' % entity.id
invoices = []
sql_query = sql_query % {
'where_condition': where_condition
}
from stalker_pyramid.views.auth import PermissionChecker
result = DBSession.connection().execute(sql_query)
update_budget_permission = \
PermissionChecker(request)('Update_Budget')
for r in result.fetchall():
invoice = {
'id': r[0],
'name': r[1],
'amount': r[2],
'unit': r[3],
'description': r[4],
'date_created': r[5],
'client_id': r[6],
'client_name': r[7],
'payments': r[8],
'percent': r[9]
}
# if update_budget_permission:
invoice['item_view_link'] = \
'/invoices/%s/view' % invoice['id']
invoice['item_update_link'] = \
'/invoices/%s/update/dialog' % invoice['id']
invoice['item_remove_link'] =\
'/entities/%s/delete/dialog?came_from=%s' % (
invoice['id'],
request.current_route_path()
)
invoice['item_duplicate_link'] =\
'/invoices/%s/duplicate/dialog?came_from=%s' % (
invoice['id'],
request.current_route_path()
)
invoices.append(invoice)
resp = Response(
json_body=invoices
)
return resp
@view_config(
route_name='get_entity_invoices_count',
renderer='json'
)
@view_config(
route_name='get_budget_invoices_count',
renderer='json'
)
def get_invoices_count(request):
"""missing docstring
"""
budget_id = request.matchdict.get('id')
logger.debug(
'get_invoices_count is working for the budget which id is %s' %
budget_id
)
sql_query = """
select count(1) from (
select
"Invoices".id
from "Invoices"
join "Budgets" on "Budgets".id = "Invoices".budget_id
where "Budgets".id = %(budget_id)s
) as data
"""
sql_query = sql_query % {'budget_id': budget_id}
from sqlalchemy import text # to be able to use "%" sign use this function
result = DBSession.connection().execute(text(sql_query))
return result.fetchone()[0]
@view_config(
route_name='get_invoice_payments',
renderer='json'
)
def get_payments(request):
"""returns payments with the given id
"""
entity_id = request.matchdict.get('id')
entity = Entity.query.filter(Entity.id == entity_id).first()
logger.debug(
'get_payments is working for the entity which id is: %s' % entity_id
)
sql_query = """
select
"Payments".id,
"Payments".amount,
"Payments".unit,
"Payment_SimpleEntities".description,
(extract(epoch from "Payment_SimpleEntities".date_created) * 1000)::bigint as date_created
from "Payments"
join "SimpleEntities" as "Payment_SimpleEntities" on "Payment_SimpleEntities".id = "Payments".id
join "Invoices" on "Invoices".id = "Payments".invoice_id
where %(where_condition)s
"""
where_condition = ''
if entity.entity_type == "Invoice":
where_condition = '"Invoices".id=%s' % entity.id
payments = []
sql_query = sql_query % {
'where_condition': where_condition
}
from stalker_pyramid.views.auth import PermissionChecker
result = DBSession.connection().execute(sql_query)
update_budget_permission = \
PermissionChecker(request)('Update_Budget')
for r in result.fetchall():
payment = {
'id': r[0],
'amount': r[1],
'unit': r[2],
'description': r[3],
'date_created': r[4]
}
# if update_budget_permission:
payment['item_update_link'] = \
'/invoices/%s/update/dialog' % payment['id']
payment['item_remove_link'] =\
'/entities/%s/delete/dialog?came_from=%s' % (
payment['id'],
request.current_route_path()
)
payment['item_duplicate_link'] =\
'/invoices/%s/duplicate/dialog?came_from=%s' % (
payment['id'],
request.current_route_path()
)
payments.append(payment)
resp = Response(
json_body=payments
)
return resp
| eoyilmaz/stalker_pyramid | stalker_pyramid/views/invoice.py | invoice.py | py | 14,596 | python | en | code | 6 | github-code | 13 |
7315471364 |
# 交换a与b
def swap(a,b):
temp=a
a=b
b=temp
return a,b
# +:0, -:1, *:2, /:3
# in:two number
# out:a tuple with result and its operator
def f(a,b):
# 让a>=b
if(a<b):
a,b=swap(a,b)
res=[]
res.append((a*b,'*'))
res.append((a+b,'+'))
res.append((a-b,'-'))
if(b!=0):
res.append((a/b,'/'))
res.append((b/a,'/'))
else:
res.append((0,'/'))
return res
# a:list,element is number
# temp:list,the solution,len=N+1
# N:dimension of initial problem
# n:len (a)
def cal(a,n,temp,N):
if(n<=2):
res=f(a[0],a[1])
for i in res:
# 例(c,'+') i[0]对应c,i[1]对应'+'
temp[N+1-n]=(a[0],i[1],a[1],'=',24)
if(abs(i[0]-24)<=1E-7):
temp[0]=True
# 输出求解过程
print(temp[1:])
break
return
else:
for i in range(0,n):
# 若前面得到了结果则退出分支
# 若注释if,则即使得到结果也会计算其他求解24点的方案
if(temp[0]==True):
break
for j in range(i+1,n):
res=f(a[i],a[j])
for k in res:
temp[N+1-n]=(a[i],k[1],a[j],k[0])
# 下一步要求解的新数组
new_a=[]
for m in range(0,n):
if(m!=i and m!=j):
new_a.append(a[m])
new_a.append(k[0])
# print (new_a,n,temp [0])
# input ()
cal(new_a,n-1,temp,N)
# 测试用例
N=4 # 初始问题大小为4个数算24点问题
temp=[False]*(N+1) # temp用来存储当前求解过程,temp [0]用来判断是否已经得到结果,初始为False,temp [1:]用来存储当前求解过程,初始为空
# 例子:输入4个数为3,8,8,9,输出结果为[(9.0, '-', 8.0), ('*', 8.0), ('*', 3.0), '=', 24]
cal([3,8,8,9],N,temp,N)
| marcusadrian666/24- | 24.py | 24.py | py | 2,021 | python | zh | code | 1 | github-code | 13 |
20919783216 | #!/usr/bin/python3
import aocd
from icecream import ic
import itertools
import math
import operator
TEST_INPUT = """199
200
208
210
200
207
240
269
260
263
"""
def test():
depths = [int(d) for d in TEST_INPUT.splitlines()]
### PART A ###
depths2 = list(zip(depths, depths[1:]))
increases = len(list(filter(lambda d: d[0] < d[1], depths2)))
assert(increases == 7)
### PART B ###
depths3 = list(zip(depths, depths[1:], depths[2:]))
depthsums = [sum(tup) for tup in depths3]
depthsums2 = list(zip(depthsums, depthsums[1:]))
increases = len(list(filter(lambda d: d[0] < d[1], depthsums2)))
assert(increases == 5)
def main():
depths = aocd.numbers
### PART A ###
depths2 = list(zip(depths, depths[1:]))
increases = len(list(filter(lambda d: d[0] < d[1], depths2)))
aocd.submit(increases, year=2021, day=1, part='a')
### PART B ###
depths3 = list(zip(depths, depths[1:], depths[2:]))
depthsums = [sum(tup) for tup in depths3]
depthsums2 = list(zip(depthsums, depthsums[1:]))
increases = len(list(filter(lambda d: d[0] < d[1], depthsums2)))
aocd.submit(increases, year=2021, day=1, part='b')
if __name__ == '__main__':
test()
main() | colematt/advent-code | 2021/p1.py | p1.py | py | 1,158 | python | en | code | 0 | github-code | 13 |
29712193280 | from django.conf import settings
import types
def log(method, **kwargs):
assert isinstance(method, types.FunctionType) is True or isinstance(
method, types.MethodType), "method == type : function or method"
if settings.DEBUG is True:
comment = ""
for key, value in kwargs.items():
comment += "\t%s : %s\n" % (key, value)
print(method.__qualname__, "= {")
print(comment[: len(comment) - 1])
print("}")
class LogUtil:
@staticmethod
def red(plain):
print("%s%s%s" % ("\033[91m", plain, "\033[0m"))
| yseiren87/jellicleSpace | server/utils/log.py | log.py | py | 586 | python | en | code | 0 | github-code | 13 |
15112705280 | # -*- coding: utf-8 -*-
"""
"""
# -*- coding: utf-8 -*-
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
import lightgbm as lgb
from sklearn import metrics
# Importing the dataset
dataset = pd.read_csv('C:/Users//Ravi Keerthi//Desktop//Disserattion//Cleaned_KIVA_Data.csv')
dataset=dataset[['backers_count', 'converted_pledged_amount', 'country', 'currency',
'category', 'currency_trailing_code', 'current_currency', 'deadline',
'disable_communication', 'goal', 'is_starrable', 'spotlight',
'staff_pick', 'state', 'static_usd_rate',
'name_len', 'name_exclaim', 'name_question', 'name_words',
'name_is_upper']]
#K-fold Cross Validatin Technique
X = dataset.iloc[:, [0,1,2,3,5,6,7,8,9,10,11,12,14,15,16,17,18,19]].values
y = dataset.iloc[:, 13].values
#K-fold Cross Validatin Technique
from sklearn.model_selection import train_test_split
# Prepare for LightGBM
# Parameters
N_FOLDS =10 #No. of Folds
MAX_BOOST_ROUNDS = 8000
LEARNING_RATE = .0022
#X_train = X_train.values.astype(np.float32, copy=False)
d_train = lgb.Dataset(X, label=y)
# 10-fold cross-validation with K=5 for KNN (the n_neigh bors parameter)
# k = 5 for KNeighborsClassifier
params = {}
params['learning_rate'] = 0.003
params['boosting_type'] = 'gbdt'
params['objective'] = 'binary'
params['metric'] = 'binary_logloss'
params['sub_feature'] = 0.5
params['num_leaves'] = 10
params['min_data'] = 50
params['max_depth'] = 10
# Cross-validate
cv_results = lgb.cv(params, d_train, num_boost_round=MAX_BOOST_ROUNDS, nfold=N_FOLDS,
verbose_eval=20, early_stopping_rounds=40)
# Display results
print('Current parameters:\n', params)
print('\nBest num_boost_round:', len(cv_results['binary_logloss-mean']))
print('Best CV score:', cv_results['binary_logloss-mean'][-1])
#Testing the live projects data
#Confusion matrix
from sklearn.metrics import confusion_matrix
cm = confusion_matrix(y_test, y_pred)
#Accuracy
from sklearn.metrics import accuracy_score
accuracy=accuracy_score(y_pred,y_test)
print("Accuracy Score using LightGBM:", metrics.accuracy_score(y_test, y_pred) )
print(" Result ")
print(metrics.classification_report(y_test, y_pred))
#Prediction
y_pred_Live=clf.predict(X_Live)
#convert into binary values
for i in range(0,len(y_pred_Live)):
if y_pred_Live[i]>=.5: # setting threshold to .5
y_pred_Live[i]=1
else:
y_pred_Live[i]=0
unique_elements, counts_elements = np.unique(y_pred_Live, return_counts=True)
print("Frequency of successful(1) and unsuccesful (0) values for LiveProjects :")
print(np.asarray((unique_elements, counts_elements))) | Ravikeerthi2401/Kickstarter_Campaign | k flod.py | k flod.py | py | 2,749 | python | en | code | 0 | github-code | 13 |
1586731608 | import datetime
import threading
from django.core.cache.backends.locmem import LocMemCache
from django.utils import timezone
class LocMemCacheBackend(LocMemCache):
"""LocMemCache Backend"""
def _set_value(
self,
key,
value_class,
value_kwargs=None,
timeout=60 * 60,
smoothly_timeout=60 * 10,
):
"""캐시 데이터 저장"""
value = value_class(**value_kwargs)
self.set(
key,
{
"value": value,
"smoothly_datetime": (
timezone.now() + datetime.timedelta(seconds=smoothly_timeout)
).strftime("%Y-%m-%d %H:%M:%S"),
},
timeout,
)
return value
def smooth(
self,
key,
value_class,
value_kwargs=None,
timeout=60 * 60,
smoothly_timeout=60 * 10,
):
"""스무스한 데이터 조회 및 설정"""
# 데이터 조회
data = self.get(key)
if data is None or type(data) != dict:
data = {}
# 데이터 조회
value = data.get("value")
smoothly_datetime = (
datetime.datetime.strptime(
data.get("smoothly_datetime"), "%Y-%m-%d %H:%M:%S"
)
if data.get("smoothly_datetime")
else None
)
# 1. 유효한 데이터의 경우 그대로 반환
if smoothly_datetime is not None and smoothly_datetime >= timezone.now():
return value
args = [key, value_class, value_kwargs, timeout, smoothly_timeout]
# 2. 데이터는 있지만 유효하지 않은 경우
if smoothly_datetime is not None and smoothly_datetime < timezone.now():
# 비동기 처리되는 동안 기존 데이터를 반환
self._set_value(
key=key,
value_class=lambda v: v,
value_kwargs={"v": value},
timeout=timeout,
smoothly_timeout=smoothly_timeout,
)
# 비동기
threading.Thread(target=self._set_value, args=args).start()
# 3. 데이터가 없는 경우
else:
# 동기
value = self._set_value(*args)
return value
| lee-lou2/lee-lou2 | src/conf/caches.py | caches.py | py | 2,316 | python | ko | code | 1 | github-code | 13 |
13194754109 |
import csv
from numpy.linalg import norm
from scipy import *
import os
from pylab import plot, show, legend,xlim,ylim,savefig,title,xlabel,ylabel,clf, loglog
from Hamil import *
from numpy import tanh,arctanh
def makevar(sx,ex,dx,st,et,dt):
x = arange(sx, ex, dx)
t = arange(st, et, dt)
return x,t
def copyarraytoC(a):
n = len(a)
b = mallocPy(n)
for i in range(n):
writetomem(b,i,a[i])
return b
def copyarrayfromC(a,n):
b = [0]*n
for i in range(n):
b[i] = readfrommem(a,i)
return b
def HamilDB(alpha,dx):
return 10.3986*(1000 + dx) - 0.7848*((2.0/alpha)*tanh(alpha * (500.0+ 0.5*dx)))
def SolE(xbeg,xend):
AB = 21.0068*tanh(0.555719*xbeg) - 19.2569*arctanh(0.641689*tanh(0.555719*xbeg))
AE = 21.0068*tanh(0.555719*xend) - 19.2569*arctanh(0.641689*tanh(0.555719*xend))
BB = 9.81*(xbeg) + tanh(0.555719*xbeg)*(2.88329*sech(0.555719*xbeg)**2 + 30.4805)
BE =9.81*(xend) + tanh(0.555719*xend)*(2.88329*sech(0.555719*xend)**2 + 30.4805)
CB = 307.641*(tanh(0.555719*xbeg)*(0.049539 - 0.00937224*sech(0.555719*xbeg)**2) -0.0625954*arctanh(0.641689*(tanh(0.555719*xbeg))))
CE = 307.641*(tanh(0.555719*xend)*(0.049539 - 0.00937224*sech(0.555719*xend)**2) -0.0625954*arctanh(0.641689*(tanh(0.555719*xend))))
A = AE - AB
B = BE - BB
C = CE - CB
#1527.68293
return 0.5*(A + B + C)
def sech2 (x):
a = 2./(exp(x) + exp(-x))
return a*a
def soliton (x,t,g,a0,a1):
c = sqrt(g*(a0 + a1))
phi = x - c*t;
k = sqrt(3.0*a1) / (2.0*a0 *sqrt(a0 + a1))
return a0 + a1*sech2(k*phi)
def solitoninit(n,a0,a1,g,x,t0,dx):
h = zeros(n)
u = zeros(n)
c = sqrt(g*(a0 + a1))
for i in range(n):
h[i] = soliton(x[i],t0,g,a0,a1)
u[i] = c* ((h[i] - a0) / h[i])
return h,u
def dambreaksmooth(x,x0,base,eta0,diffuse,dx):
from numpy import tanh
n = len(x)
h = zeros(n)
u = zeros(n)
for i in range(n):
h[i] = base + 0.5*eta0*(1 + tanh(diffuse*(x0 - abs(x[i]))))
return h,u
def interpquarticval(aj,bj,cj,dj,ej,xj,x):
return aj*(x -xj)*(x -xj)*(x -xj)*(x -xj) + bj*(x -xj)*(x -xj)*(x -xj) \
+ cj*(x -xj)*(x -xj) + dj*(x -xj)+ ej
def interpquarticgrad(aj,bj,cj,dj,ej,xj,x):
return 4*aj*(x -xj)*(x -xj)*(x -xj) + 3*bj*(x -xj)*(x -xj) \
+ 2*cj*(x -xj) + dj
def interpquartcoeff(q,j,dx):
i24 = 1.0 / 24.0
i12 = 1.0 / 12.0
idx = 1.0/dx
aj = i24*idx*idx*idx*idx*(q[j+2] - 4*q[j+1] + 6*q[j] - 4*q[j-1] + q[j-2])
bj = i12*idx*idx*idx*(q[j+2] - 2*q[j+1] + 2*q[j-1] - q[j-2])
cj = i24*idx*idx*(-q[j+2] + 16*q[j+1] - 30*q[j] + 16*q[j-1] - q[j-2])
dj = i12*idx*(-q[j+2] + 8*q[j+1] - 8*q[j-1] + q[j-2])
ej = q[j]
return aj,bj,cj,dj,ej
def interpquartic(u,h,x,xh,nBC):
n = len(x)
nu = []
nh = []
nx = []
for i in range(nBC,n-nBC):
aj,bj,cj,dj,ej = interpquartcoeff(h,i,dx)
nh.append(interpquarticval(aj,bj,cj,dj,ej,x[i],xh[i]))
aj,bj,cj,dj,ej = interpquartcoeff(u,i,dx)
nu.append(interpquarticval(aj,bj,cj,dj,ej,x[i],xh[i]))
nx.append(xh[i])
return nh, nu,nx
"""
##### Soliton
sdir = "../../../../data/postprocessing/EnergyCheck/Interp/"
if not os.path.exists(sdir):
os.makedirs(sdir)
s = sdir + "Energy.txt"
with open(s,'a') as file2:
writefile2 = csv.writer(file2, delimiter = ',', quotechar='|', quoting=csv.QUOTE_MINIMAL)
for k in range(6,17):
dx = (100.0 / (2**k))
l = 0.01
dt = l*dx
startx = -50
endx = 250 + dx
startt = 0.0
endt = 30.0+(dt*0.9)
szoomx = startx
ezoomx = endx
#number of boundary conditions (one side)
niBC = 3 #total
g = 9.81
gap = int(0.5/dt)
x,t = makevar(startx,endx,dx,startt,endt,dt)
xh,th = makevar(startx-0.5*dx,endx,dx,startt,endt,dt)
n = len(x)
nBC = 3
hm,um = solitoninit(n,1.0,1.0,g,x,0.0,dx)
hhm,uhm = solitoninit(len(xh),1.0,1.0,g,xh,0.0,dx)
nh, nu, nx = interpquartic(um,hm,x,xh,nBC)
nhh,nuh = solitoninit(len(nx),1.0,1.0,g,nx,0.0,dx)
L1h = norm(nh - nhh,ord=1) / norm(nh,ord=1)
L1u = norm(nu - nuh,ord=1) / norm(nuh,ord=1)
s = sdir + "InterpEh.dat"
with open(s,'a') as file3:
s ="%3.8f%5s%1.30f\n" %(dx," ",L1h)
file3.write(s)
s = sdir + "InterpEu.dat"
with open(s,'a') as file2:
s ="%3.8f%5s%1.30f\n" %(dx," ",L1u)
file2.write(s)
"""
"""
#### Dam Break
sdir = "../../../../data/postprocessing/EnergyCheckN/DB0p5/"
if not os.path.exists(sdir):
os.makedirs(sdir)
deltaxa = range(1,20)
#diffuses = [0.01,0.025,0.05,0.075,0.1,0.25,0.5,0.75,1.0,2.5,5.0,7.5,10.0,25.0,50.0,75.0,100.0,250.0,500.0,750.0,1000.0]
diffuses = [0.5]
s = sdir + "Energy.txt"
with open(s,'a') as file2:
writefile2 = csv.writer(file2, delimiter = ',', quotechar='|', quoting=csv.QUOTE_MINIMAL)
writefile2.writerow(['dx' ,'alpha','Numerical','analytical', "Rel Err"])
for ll in deltaxa:
for lk in diffuses:
dx = (100.0 / (2**ll))
l = 0.01
dt = l*dx
startx = 0.0
endx = 1000.0 + dx
startt = 0.0
endt = 30.0+(dt*0.9)
szoomx = startx
ezoomx = endx
#number of boundary conditions (one side)
niBC = 3 #total
g = 9.81
gap = int(0.5/dt)
x,t = makevar(startx,endx,dx,startt,endt,dt)
n = len(x)
hf = 1.8
hl = 1.0
base = hl
eta0 = hf - hl
x0 = 500
diffuse = lk
hm,um = dambreaksmooth(x,x0,base,eta0,diffuse,dx)
umbegi = zeros(niBC)
umendi = zeros(niBC)
hmbegi = ones(niBC)
hmendi = ones(niBC)
for i in range(niBC):
umbegi[i] = um[0]
umendi[i] = um[-1]
hmbegi[i] = hm[0]
hmendi[i] = hm[-1]
umbeg = umbegi
umend = umendi
hmbeg = hmbegi
hmend = hmendi
xbeg = arange(startx - niBC*dx,startx,dx)
xend = arange(endx + dx,endx + (niBC+1)*dx,dx)
xbc = concatenate([xbeg,x,xend])
ubc = concatenate([umbeg,um,umend])
hbc = concatenate([hmbeg,hm,hmend])
xbc_c = copyarraytoC(xbc)
ubc_c = copyarraytoC(ubc)
hbc_c = copyarraytoC(hbc)
Evali = HankEnergyall(xbc_c,hbc_c,ubc_c,g,n + 2*niBC,niBC,dx)
H0 = HamilDB(diffuse,dx)
relErr = abs(H0 - Evali) / abs(H0)
s = sdir + "Energy.txt"
with open(s,'a') as file2:
writefile2 = csv.writer(file2, delimiter = ',', quotechar='|', quoting=csv.QUOTE_MINIMAL)
writefile2.writerow([str(dx),str(diffuse),str(Evali),str(H0), str(relErr)])
s = sdir + "relE.dat"
with open(s,'a') as file3:
s ="%3.8f%5s%1.50f\n" %(dx," ",relErr)
file3.write(s)
"""
##### Soliton
sdir = "../../../../data/postprocessing/EnergyCheck/SolitonN/"
if not os.path.exists(sdir):
os.makedirs(sdir)
deltaxa = range(1,20)
s = sdir + "Energy.txt"
with open(s,'a') as file2:
writefile2 = csv.writer(file2, delimiter = ',', quotechar='|', quoting=csv.QUOTE_MINIMAL)
writefile2.writerow(['dx','Numerical','analytical', "Rel Err"])
for ll in deltaxa:
dx = (100.0 / (2**ll))
l = 0.01
dt = l*dx
startx = -50
endx = 250 + dx
startt = 0.0
endt = 30.0+(dt*0.9)
szoomx = startx
ezoomx = endx
#number of boundary conditions (one side)
niBC = 3 #total
g = 9.81
gap = int(0.5/dt)
x,t = makevar(startx,endx,dx,startt,endt,dt)
n = len(x)
hm,um = solitoninit(n,1.0,1.0,g,x,0.0,dx)
umbegi = zeros(niBC)
umendi = zeros(niBC)
hmbegi = ones(niBC)
hmendi = ones(niBC)
for i in range(niBC):
umbegi[i] = um[0]
umendi[i] = um[-1]
hmbegi[i] = hm[0]
hmendi[i] = hm[-1]
umbeg = umbegi
umend = umendi
hmbeg = hmbegi
hmend = hmendi
xbeg = arange(startx - niBC*dx,startx,dx)
xend = arange(endx + dx,endx + (niBC+1)*dx)
xbc = concatenate([xbeg,x,xend])
ubc = concatenate([umbeg,um,umend])
hbc = concatenate([hmbeg,hm,hmend])
xbc_c = copyarraytoC(xbc)
ubc_c = copyarraytoC(ubc)
hbc_c = copyarraytoC(hbc)
Evali = HankEnergyall(xbc_c,hbc_c,ubc_c,g,n + 2*niBC,niBC,dx)
H0 =Soliton(0.5*dx)
relErr = abs(H0 - Evali) / abs(H0)
s = sdir + "Energy.txt"
with open(s,'a') as file2:
writefile2 = csv.writer(file2, delimiter = ',', quotechar='|', quoting=csv.QUOTE_MINIMAL)
writefile2.writerow([str(dx),str(Evali),str(H0), str(relErr)])
s = sdir + "relE.dat"
with open(s,'a') as file3:
s ="%3.8f%5s%1.15f\n" %(dx," ",relErr)
file3.write(s)
| jordanpitt3141/collectedworks | postprocessing/makeup/HamiltonainCheck/Energies.py | Energies.py | py | 9,742 | python | en | code | 0 | github-code | 13 |
19933233597 | # 实现 int sqrt(int x) 函数。
# 计算并返回 x 的平方根,其中 x 是非负整数。
# 由于返回类型是整数,结果只保留整数的部分,小数部分将被舍去。
# 示例 1:
# 输入: 4
# 输出: 2
# 示例 2:
# 输入: 8
# 输出: 2
# 说明: 8 的平方根是 2.82842...,由于返回类型是整数,小数部分将被舍去。
### Solution:采用二分法思想,先指数扩张,再二分查找
### 平方根整数解,要满足 result^2 <= x < (result+1)^2
class Solution:
def mySqrt(self, x: int) -> int:
right = 1
# 找到max_num^2>=x的max_num
while right*right < x:
right <<= 1
left = right >> 1
# 定位到result在[left,right]之间,在此范围内二分查找得到result
while left < right:
# 取到右中位数,这很重要(根据左右端点的取值变化而确定)
mid = (left + right + 1) >> 1
# 当mid^2<=x时,result一定在[mid, right]之间
if mid * mid <= x:
left = mid
# 当mid^2>x时,result一定在[left,mid-1]之间
else:
right = mid-1
return left
| Vivhchj/LeeeCode_Notes | 69.x的平方根_easy.py | 69.x的平方根_easy.py | py | 1,234 | python | zh | code | 0 | github-code | 13 |
16641389235 | # ---------------------------------------------------------------+
#
# Albert Negura
# 2-Dimensional Particle Swarm Optimization (PSO) with Python
# February, 2021
#
# ---------------------------------------------------------------+
# --- IMPORT DEPENDENCIES----------------------------------------+
# mathematics / algorithm imports
import math
import numpy as np
from functools import partial
# matplotlib for plotting
import matplotlib
import matplotlib.pyplot as plt
import matplotlib.animation as animation
from matplotlib.widgets import Slider, Button, RadioButtons
matplotlib.use("TkAgg")
# config parser for .ini
import configparser
# --- PSO CLASS--------------------------------------------------+
def rand_cmap(nlabels):
from matplotlib.colors import LinearSegmentedColormap
import colorsys
import numpy as np
# Generate color map for bright colors, based on hsv
randHSVcolors = [(np.random.uniform(low=0.0, high=1),
np.random.uniform(low=0.2, high=1),
np.random.uniform(low=0.9, high=1)) for i in range(nlabels)]
# Convert HSV list to RGB
randRGBcolors = []
for HSVcolor in randHSVcolors:
randRGBcolors.append(colorsys.hsv_to_rgb(HSVcolor[0], HSVcolor[1], HSVcolor[2]))
random_colormap = LinearSegmentedColormap.from_list('new_map', randRGBcolors, N=nlabels)
return random_colormap
class PSO():
# config-adjustable parameters
swarmsize = None
iterations = None
omega = None # inertia
c1 = None # cognitive constant
c2 = None # social constant
T1 = None
T2 = None
CONVERGENCE = None
PROCESSES = None
mp_pool = None
# function selector (6 implemented functions)
function = None
lower_bounds = None
upper_bounds = None
goal = None
# plotting lists
x_hist = None
v_hist = None
avg_cost_function = None
min_cost_function = None
# scale_factor = None
def __init__(self, mode='config', swarmsize=100, iterations=100, omega=0.5, c1=0.5, c2=0.5,
T1=1e-10, T2=1e-10, CONVERGENCE=False, PROCESSES=1, function=0):
if mode != 'config':
self.swarmsize = swarmsize
self.iterations = iterations
self.omega = omega
self.c1 = c1
self.c2 = c2
self.T1 = T1
self.T2 = T2
self.CONVERGENCE = CONVERGENCE
self.PROCESSES = PROCESSES
self.function = function
else:
config = configparser.ConfigParser()
config.read('config.ini')
pso_config = config['pso']
self.swarmsize = pso_config.getint("swarm_size")
self.iterations = pso_config.getint("maximum_iterations")
self.omega = pso_config.getfloat("inertia")
self.c1 = pso_config.getfloat("cognitive_constant")
self.c2 = pso_config.getfloat("social_constant")
self.T1 = pso_config.getfloat("step_convergence_threshold")
self.T2 = pso_config.getfloat("value_convergence_threshold")
self.CONVERGENCE = pso_config.getboolean("converge_early")
self.PROCESSES = pso_config.getint("PROCESSES")
self.function = config['functions'].getint("function_selection")
self.x_hist = np.zeros((self.iterations, self.swarmsize, 2))
self.v_hist = np.zeros((self.iterations, self.swarmsize, 2))
self.avg_cost_function = np.zeros((self.iterations))
self.min_cost_function = np.zeros((self.iterations))
self.ani_list = [animation.FuncAnimation, animation.FuncAnimation]
self.lower_bounds = [0, 0]
self.upper_bounds = [10, 10]
# initialize upper, lower bound and global minimum based on the function selector
if self.function == 0:
self.lower_bounds = [0, 0]
self.upper_bounds = [10, 10]
self.goal = [7.917, 7.917]
elif self.function == 1:
self.lower_bounds = [-4.5, -4.5]
self.upper_bounds = [4.5, 4.5]
self.goal = [3, 0.5]
elif self.function == 2:
self.lower_bounds = [-2 * math.pi, -2 * math.pi]
self.upper_bounds = [2 * math.pi, 2 * math.pi]
self.goal = [[4.70104, 3.15294], [-1.58214, -3.13024]]
elif self.function == 3:
self.lower_bounds = [-5.2, -5.2]
self.upper_bounds = [5.2, 5.2]
self.goal = [0, 0]
elif self.function == 4:
self.lower_bounds = [-5.12, -5.12]
self.upper_bounds = [5.12, 5.12]
self.goal = [0, 0]
elif self.function == 5:
self.lower_bounds = [-5, -5]
self.upper_bounds = [10, 10]
self.goal = [1, 1]
# self.scale_factor = np.abs((np.max(self.upper_bounds) - np.min(self.lower_bounds))) * 2
# --- COST FUNCTION----------------------------------------------+
def error(self, x):
x1 = x[0]
x2 = x[1]
if self.function == 0: # alpine n.2
return -(np.sqrt(np.abs(x1)) * np.sin(x1) * np.sqrt(np.abs(x2)) * np.sin(x2))
elif self.function == 1: # beale
return (1.5 - x1 + x1 * x2) ** 2 + (2.25 - x1 + x1 * x2 ** 2) ** 2 + (2.625 - x1 + x1 * x2 ** 3) ** 2
elif self.function == 2: # bird
return np.sin(x1) * np.exp((1 - np.cos(x2)) ** 2) + np.cos(x2) * np.exp((1 - np.sin(x1)) ** 2)
elif self.function == 3: # drop-wave
return -(1 + np.cos(12 * np.sqrt(x1 ** 2 + x2 ** 2))) / (0.5 * (x1 ** 2 + x2 ** 2) + 2)
elif self.function == 4: # rastrigin
return 20 + (x1 ** 2 - 10 * np.cos(math.pi * 2 * x1)) + (x2 ** 2 - 10 * np.cos(math.pi * 2 * x2))
elif self.function == 5: # rosenbrock a=0,b=1
return (x2 - x1 ** 2) ** 2 + x1 ** 2
else:
return - (np.sqrt(x1) * np.sin(x1) * np.sqrt(x2) * np.sin(x2))
def function_of(self, x, y):
return self.error([x, y])
def error_plot(self, values):
z = np.zeros(values.shape[0])
for i in range(values.shape[0]):
val = values[i]
z[i] = self.error(val)
return z
# --- ALGORITHMS-------------------------------------------------+
def gradient_descent(self):
x = 1
def particle_swarm_optimization(self):
# local copies of bounds for shorter calls
lb = np.array(self.lower_bounds.copy())
ub = np.array(self.upper_bounds.copy())
assert np.all(ub > lb), 'All upper bound values must be greater than the corresponding lower bound values'
# set lower and upper bounds to velocities based on position bounds
upper_bound_velocity = np.abs(ub - lb)
lower_bound_velocity = -upper_bound_velocity
objective = self.error
if self.PROCESSES > 1:
import multiprocessing
mp_pool = multiprocessing.Pool(self.PROCESSES)
# initialize a few arrays
positions = np.random.rand(self.swarmsize, 2) # particle position
best_positions = np.zeros_like(positions) # best known position per particle
objectives = np.zeros(self.swarmsize) # objective function value per particle
best_objectives = np.ones(self.swarmsize) * np.inf # best particle position objective function value
best_swarm_positions = []
best_swarm_objective = np.inf # best swarm position
# initialize particle positions randomly in the function bounds
positions = lb + positions * (ub - lb)
# calculate objectives for each particles
if self.PROCESSES > 1:
objectives = np.array(self.mp_pool.map(objective, positions))
else:
for i in range(self.swarmsize):
objectives[i] = objective(positions[i, :]) # calculate objective function
i_update = objectives < best_objectives # selector to decide which particles to update
best_positions[i_update, :] = positions[i_update, :].copy()
best_objectives[i_update] = objectives[i_update] # best particle position
# index of best particle
i_min = np.argmin(best_objectives)
if best_objectives[i_min] < best_swarm_objective: # if the best particle is in a better position than all other particles
best_objective = best_objectives[i_min]
best_swarm_positions = best_positions[i_min, :].copy() # best known swarm position
else:
best_swarm_positions = positions[0, :].copy() # best known swarm position
# calculate initial velocity vector
velocities = lower_bound_velocity + np.random.rand(self.swarmsize, 2) * (
upper_bound_velocity - lower_bound_velocity)
# iterate over self.iterations
it = 1
while it <= self.iterations:
# add position/velocity of all particles to history array for plotting
self.x_hist[it - 1] = np.array(positions)
self.v_hist[it - 1] = np.array(velocities)
# update velocity vector with slight randomization to approach minimum
rp = np.random.uniform(size=(self.swarmsize, 2))
rg = np.random.uniform(size=(self.swarmsize, 2))
velocities = self.omega * velocities + self.c1 * rp * (best_positions - positions) + self.c2 * rg * (
best_swarm_positions - positions)
# update position vector
positions = positions + velocities
# prevent out of bounds
lower_mask = positions < lb
upper_mask = positions > ub
positions = positions * (~np.logical_or(lower_mask, upper_mask)) + lb * lower_mask + ub * upper_mask
# update objective function
if self.PROCESSES > 1:
objectives = np.array(self.mp_pool.map(objective, positions))
else:
for i in range(self.swarmsize):
objectives[i] = objective(positions[i, :])
# store best position
i_update = objectives < best_objectives
best_positions[i_update, :] = positions[i_update, :].copy()
best_objectives[i_update] = objectives[i_update]
# compare swarm best position with global best position
i_min = np.argmin(best_objectives)
self.min_cost_function[it - 1] = best_objectives[i_min] # min cost function for plotting
self.avg_cost_function[it - 1] = np.average(best_objectives) # average cost function for plotting
if best_objectives[i_min] < best_swarm_objective:
best_particle_position = best_positions[i_min, :].copy()
stepsize = np.sqrt(np.sum((best_swarm_positions - best_particle_position) ** 2))
# converge early
# if swarm objective change is too small
if self.CONVERGENCE and np.abs(best_swarm_objective - best_objectives[i_min]) <= self.T2:
self.iterations = it
self.x_hist = self.x_hist[:it]
self.v_hist = self.v_hist[:it]
self.min_cost_function = self.min_cost_function[:it]
self.avg_cost_function = self.avg_cost_function[:it]
# else if swarm best position change is too small
elif self.CONVERGENCE and stepsize <= self.T1:
self.iterations = it
self.x_hist = self.x_hist[:it]
self.v_hist = self.v_hist[:it]
self.min_cost_function = self.min_cost_function[:it]
self.avg_cost_function = self.avg_cost_function[:it]
# else do not converge early and iterate again
else:
best_swarm_positions = best_particle_position.copy()
best_swarm_objective = best_objectives[i_min]
it += 1
# --- PLOTTING---------------------------------------------------+
def plot_all(self):
from multiprocessing import Process
fig, ((ax1,ax2), (ax3, ax4)) = plt.subplots(2, 2, sharex=False, sharey=False)
fig.set_figheight(15)
fig.set_figwidth(15)
p1 = Process(pso.animate2D(self.min_cost_function, "Min", fig, ax1, 1))
p1.start()
p2 = Process(pso.animate2D(self.avg_cost_function, "Average", fig, ax2, 2))
p2.start()
p3 = Process(pso.animate_contour(self.x_hist, self.v_hist, fig, ax3))
p3.start()
p4 = Process(pso.animate3D(self.x_hist, self.v_hist, fig, ax4))
p4.start()
plt.show()
def animate2D(self, data_used, label, fig, ax1, ax):
self.data = data_used.copy()
self.stop = np.size(self.data)
ax1.set(xlim=[0, self.stop], ylim=[np.min(self.data), np.max(self.data)])
ax1.tick_params(axis='x', labelbottom=False)
ax1.tick_params(axis='y', labelleft=False)
indices = np.linspace(0, self.stop, self.stop - 1)
axis = fig.add_subplot(2,2,ax)
#ax.xlabel.set_text('Iterations')
#ax.ylabel.set_text('Cost')
axis.title.set_text(label + ' Cost Function')
axis.plot(self.data, lw=3)
#line, = axis.plot([], [], lw=3)
#self.ani_list[ax-1] = (animation.FuncAnimation(fig, self.animate, frames=self.iterations,fargs=[indices, self.data, line, ax], interval=20, blit=True))
def animate(self, i, x, y, line, ax):
if i >= self.stop - 1:
self.ani_list[ax-1].event_source.stop()
line.set_data(x[:i], y[:i])
#line.axes.axis([0, np.size(self.data), np.min(self.data), np.max(self.data)])
return line,
def animate_contour(self, positions, velocities, fig, ax):
self.xs = positions.copy()
self.vs = velocities.copy()
self.stop = self.xs.shape[0]
ax.set(xlim=[np.min(self.lower_bounds), np.max(self.upper_bounds)],
ylim=[np.min(self.lower_bounds), np.max(self.upper_bounds)])
if np.max(self.upper_bounds) > 0 and np.min(self.lower_bounds) < 0:
x = np.arange(np.min(self.lower_bounds) * 2, np.max(self.upper_bounds) * 2, 0.05)
y = np.arange(np.min(self.lower_bounds) * 2, np.max(self.upper_bounds) * 2, 0.05)
elif np.min(self.lower_bounds) < 0 and np.max(self.upper_bounds) < 0:
x = np.arange(np.min(self.lower_bounds), 0 - np.max(self.upper_bounds), 0.05)
y = np.arange(np.min(self.lower_bounds), 0 - np.max(self.upper_bounds), 0.05)
elif np.min(self.lower_bounds) > 0 and np.max(self.upper_bounds) > 0:
x = np.arange(abs(np.min(self.lower_bounds)) + np.min(self.lower_bounds), 2 * np.max(self.upper_bounds),
0.05)
y = np.arange(abs(np.min(self.lower_bounds)) + np.min(self.lower_bounds), 2 * np.max(self.upper_bounds),
0.05)
else:
x = np.arange(2 * np.min(self.lower_bounds), abs(np.max(self.upper_bounds)) + np.max(self.upper_bounds),
0.05)
y = np.arange(2 * np.min(self.lower_bounds), abs(np.max(self.upper_bounds)) + np.max(self.upper_bounds),
0.05)
X, Y = np.meshgrid(x, y)
zs = np.array(self.function_of(np.ravel(X), np.ravel(Y)))
Z = zs.reshape(X.shape)
self.CS = ax.contour(X, Y, Z, levels = 20, cmap='viridis')
ax.title.set_text("2D Contour Plot of Objective Function")
Xs = self.xs[0]
x_Xs = Xs[:, 0]
y_Xs = Xs[:, 1]
Vs = self.vs[0]
x_Vs = Vs[:, 0]
y_Vs = Vs[:, 1]
cmap = rand_cmap(self.swarmsize)
if len(self.goal) == 2:
goal_scatter = ax.scatter(self.goal[0], self.goal[1], s=self.swarmsize * 10, marker="x")
else:
goal_x = self.goal[:, 0]
goal_y = self.goal[:, 1]
goal_scatter = ax.scatter(goal_x, goal_y, s=self.swarmsize * 10, marker="x")
scatters = ax.scatter(x_Xs, y_Xs, c=[i for i in range(self.swarmsize)], cmap=cmap, marker="o", vmin=0,
vmax=self.swarmsize)
# self.contour_vectors = self.ax2.quiver(x_Xs, y_Xs, x_Vs, y_Vs, scale=50)
lines = []
for i in range(self.swarmsize):
line = ax.plot(self.xs[0, i, 0], self.xs[0, i, 1], c=cmap(i), alpha=0.3)
lines.append(line)
self.ani2 = animation.FuncAnimation(fig, self.animate2, frames=self.iterations, fargs=[scatters, lines],
interval=100, blit=False, repeat=True)
def animate2(self, i, scatters, lines):
# global contour_vectors
plot_data = self.xs[i]
v_plot_data = self.vs[i]
# self.contour_vectors.remove()
scatters.set_offsets(plot_data)
if i > 5:
for lnum, line in enumerate(lines):
data = self.xs[i - 5:i, lnum, :]
line[0].set_data(data[:, 0], data[:, 1])
# self.contour_vectors = self.ax2.quiver(plot_data[:, 0], plot_data[:, 1], v_plot_data[:, 0], v_plot_data[:, 1],scale=50)
return scatters,
def animate3D(self, positions, velocities, fig, ax):
self.xs = positions.copy()
self.vs = velocities.copy()
ax = fig.add_subplot(2,2,4, projection='3d')
x = np.arange(np.min(self.lower_bounds), np.max(self.upper_bounds), 0.05)
y = np.arange(np.min(self.lower_bounds), np.max(self.upper_bounds), 0.05)
X, Y = np.meshgrid(x, y)
zs = np.array(self.function_of(np.ravel(X), np.ravel(Y)))
Z = zs.reshape(X.shape)
ax.plot_surface(X, Y, Z, cmap='gray', edgecolor='none', alpha=0.2)
ax.title.set_text("3D Plot of Objective Function")
self.stop = self.xs.shape[0]
# self.scale_factor /= self.stop
Xs = self.xs[0]
x_Xs = Xs[:, 0]
y_Xs = Xs[:, 1]
z_Xs = self.error_plot(Xs[:, :])
Vs = self.vs[0]
x_Vs = Vs[:, 0] # * self.scale_factor
y_Vs = Vs[:, 1] # * self.scale_factor
z_Vs = self.error_plot(Vs[:, :]) # * self.scale_factor
if len(self.goal) == 2:
goal_z = self.error_plot(np.array([self.goal]))
goal_scatter = ax.scatter(self.goal[0], self.goal[1], goal_z, s=self.swarmsize * 10, marker="x")
else:
goal_x = self.goal[:, 0]
goal_y = self.goal[:, 1]
goal_z = self.error_plot(np.array(self.goal))
goal_scatter = ax.scatter(goal_x, goal_y, goal_z, s=self.swarmsize * 10, marker="x")
cmap = rand_cmap(self.swarmsize)
scatters = ax.scatter(x_Xs, y_Xs, z_Xs, c=[i for i in range(self.swarmsize)], cmap=cmap, marker="o",
vmin=0, vmax=self.swarmsize)
# self.vectors = self.ax3.quiver(x_Xs, y_Xs, z_Xs, x_Vs, y_Vs, z_Vs)
lines = []
for i in range(self.swarmsize):
line = ax.plot(self.xs[0, i, 0], self.xs[0, i, 1], z_Xs[i], c=cmap(i), alpha=0.5)
lines.append(line)
self.ani3 = animation.FuncAnimation(fig, self.animate3, frames=self.iterations, fargs=[scatters, lines],
interval=100,
blit=False, repeat=True)
def animate3(self, i, scatters, lines):
# global vectors, scale_factor
plot_data = self.xs[i]
v_plot_data = self.vs[i]
z_Xs = self.error_plot(plot_data[:])
# self.vectors.remove()
if i > 5:
for lnum, line in enumerate(lines):
data = self.xs[i - 5:i, lnum, :]
function_data = self.error_plot(data)
line[0].set_data(data[:, 0], data[:, 1])
line[0].set_3d_properties(function_data)
scatters._offsets3d = (plot_data[:, 0], plot_data[:, 1], z_Xs)
# self.vectors = self.ax3.quiver(plot_data[:, 0], plot_data[:, 1], z_Xs,v_plot_data[:, 0] * self.scale_factor, v_plot_data[:, 1] * self.scale_factor,z_Xs * self.scale_factor)
return scatters,
if __name__ == '__main__':
pso = PSO()
pso.particle_swarm_optimization()
pso.plot_all()
| AlbertNegura/ParticleSwarmOptimization | particle_swarm_optimization.py | particle_swarm_optimization.py | py | 20,236 | python | en | code | 1 | github-code | 13 |
21774380069 | # -*- coding: utf-8 -*-
"""
XMS Client module
"""
from __future__ import absolute_import, division, print_function
try:
from urllib.parse import quote_plus, urlencode
except ImportError:
from urllib import quote_plus, urlencode
import logging
import requests
import clx.xms.__about__
from clx.xms import deserialize, serialize, api
class Client(object):
"""Client used to communicate with the XMS server.
:param str service_plan_id: service plan identifier
:param str token: authentication token
:param str endpoint: URL to XMS endpoint
:param float timeout: Connection and read timeout, in seconds
This class will use the Requests_ library to communicate with XMS.
It is intended as a long lived object and can handle multiple
requests.
For example, to send a simple parameterized text batch to three
recipients we may use code such as::
client = clx.xms.Client('{my-service-plan-id}', '{my-token}')
try:
batch_params = clx.xms.api.MtBatchTextSmsCreate()
batch_params.sender = '12345'
batch_params.recipients = ['987654321', '123456789', '567894321']
batch_params.body = 'Hello, ${name}!'
batch_params.parameters = {
'name': {
'987654321': 'Mary',
'123456789': 'Joe',
'default': 'valued customer'
}
}
batch = client.create_text_batch(batch_params)
print('The batch was given ID %s' % batch.batch_id)
except Exception as ex:
print('Error creating batch: %s' % str(ex))
and to fetch a batch we may use the code (with ``client`` being
the same variable as above)::
try:
batch = client.fetch_batch('{a batch identifier}')
print('The batch was sent from %s' % batch.sender())
except Exception as ex:
print('Error fetching batch: %s' % str(ex))
.. _Requests: http://python-requests.org/
"""
DEFAULT_ENDPOINT = "https://api.clxcommunications.com/xms"
"""The default XMS endpoint URL. This is the endpoint that will be
used unless a custom one is specified in the :class:`Client`
constructor.
"""
DEFAULT_TIMEOUT = 30.0
"""The default timeout value in seconds. This is used unless a custom
timeout value is specified in :attr:`timeout`.
"""
_LOGGER = logging.getLogger('clx.xms.client')
def __init__(self, service_plan_id, token,
endpoint=DEFAULT_ENDPOINT, timeout=DEFAULT_TIMEOUT):
self._session = requests.Session()
self._service_plan_id = service_plan_id
self._token = token
self._endpoint = endpoint
self._timeout = timeout
@property
def service_plan_id(self):
"""The service plan identifier used for this client.
:type: str
"""
return self._service_plan_id
@property
def token(self):
"""The authentication token used for this client.
:type: str
"""
return self._token
@property
def endpoint(self):
"""The XMS endpoint used by this client.
:type: str
"""
return self._endpoint
@property
def timeout(self):
"""The timeout value used for this client. In seconds.
The connection and read timeout, in seconds, used in
communication with XMS. The default is specified by the constant
:const:`DEFAULT_TIMEOUT`.
:type: float
"""
return self._timeout
def _headers(self):
return {
'Accept': 'application/json',
'Authorization': 'Bearer ' + self._token,
'User-Agent': "sdk-xms-python/%s; %s" %
(clx.xms.__about__.__version__,
requests.utils.default_user_agent())
}
def _url(self, sub_path):
"""Builds an endpoint URL for the given sub-path.
:param str sub_path: the sub-path
:return: an URL
:rtype: str
"""
return self._endpoint + '/v1/' + self._service_plan_id + sub_path
def _batch_url(self, batch_id, sub_path=''):
"""Builds an endpoint URL for the given batch and sub-path.
:param str batch_id: a batch identifier
:param str sub_path: additional sub-path
:returns: a complete URL
:rtype: str
:raises ValueError: if given an invalid batch ID
"""
ebid = quote_plus(batch_id)
if ebid == '':
raise ValueError("Empty batch ID given")
return self._url('/batches/' + ebid + sub_path)
def _group_url(self, group_id, sub_path=''):
"""Builds an endpoint URL for the given group and sub-path.
:param str group_id: a group identifier
:param str sub_path: additional sub-path
:returns: a complete URL
:rtype: str
:raises ValueError: if given an invalid group ID
"""
egid = quote_plus(group_id)
if egid == '':
raise ValueError("Empty group ID given")
return self._url('/groups/' + egid + sub_path)
def _check_response(self, resp):
"""Checks the given HTTP response and returns it if OK.
If any problem is found then a suitable exception is raised.
This method also logs the request and response at the debug
level.
:param Response resp: HTTP response to check
"""
Client._LOGGER.debug('Request{%s} Response(code %d){%s}',
resp.request.body, resp.status_code, resp.text)
# If "200 OK" or "201 Created". We'll here assume any 2XX code is OK.
if resp.status_code >= 200 and resp.status_code < 300:
return resp
# If "400 Bad Request" or "403 Forbidden".
elif resp.status_code == 400 or resp.status_code == 403:
error = deserialize.error(resp)
raise clx.xms.exceptions.ErrorResponseException(
error.text, error.code)
# If "404 Not Found".
elif resp.status_code == 404:
raise clx.xms.exceptions.NotFoundException(resp.request.url)
# If "401 Unauthorized"
elif resp.status_code == 401:
raise clx.xms.exceptions.UnauthorizedException(
self._service_plan_id, self._token)
else:
raise clx.xms.exceptions.UnexpectedResponseException(
"Unexpected HTTP status %s" % resp.status_code, resp.text)
def _delete(self, url):
resp = self._session.delete(
url, headers=self._headers(), timeout=self._timeout)
return self._check_response(resp)
def _get(self, url):
resp = self._session.get(
url, headers=self._headers(), timeout=self._timeout)
return self._check_response(resp)
def _post(self, url, fields):
resp = self._session.post(
url, json=fields, headers=self._headers(), timeout=self._timeout)
return self._check_response(resp)
def _put(self, url, fields):
resp = self._session.put(
url, json=fields, headers=self._headers(), timeout=self._timeout)
return self._check_response(resp)
def create_text_message(self, sender, recipient, body):
"""Creates a text message to a single recipient.
This is a convenience method that creates a text batch having
a single recipient.
:param str sender: the message sender
:param str recipient: the message recipient
:param str body: the message body
:returns: the created batch
:rtype: MtBatchTextSmsResult
"""
batch = api.MtBatchTextSmsCreate()
batch.sender = sender
batch.recipients = {recipient}
batch.body = body
return self.create_batch(batch)
def create_binary_message(self, sender, recipient, udh, body):
"""Creates a text message to a single recipient.
This is a convenience method that creates a text batch having
a single recipient.
:param str sender: the message sender
:param str recipient: the message recipient
:param binary udh: the message User Data Header
:param binary body: the message binary body
:returns: the created batch
:rtype: MtBatchBinarySmsResult
"""
batch = api.MtBatchBinarySmsCreate()
batch.sender = sender
batch.recipients = {recipient}
batch.udh = udh
batch.body = body
return self.create_batch(batch)
def create_batch(self, batch):
"""Creates the given batch.
:param batch: the text or binary batch to create
:type batch: MtBatchTextSmsCreate or MtBatchBinarySmsCreate
:returns: the batch result
:rtype: MtBatchTextSmsResult or MtBatchBinarySmsResult
"""
if hasattr(batch, 'udh'):
fields = serialize.binary_batch(batch)
else:
fields = serialize.text_batch(batch)
response = self._post(self._url('/batches'), fields)
return deserialize.batch_result(response)
def replace_batch(self, batch_id, batch):
"""Replaces the batch with the given ID with the given batch.
:param str batch_id: identifier of the batch
:param batch: the replacement batch
:type batch: MtBatchTextSmsCreate or MtBatchBinarySmsCreate
:return: the resulting batch
:rtype: MtBatchTextSmsResult
"""
if hasattr(batch, 'udh'):
fields = serialize.binary_batch(batch)
else:
fields = serialize.text_batch(batch)
response = self._put(self._batch_url(batch_id), fields)
return deserialize.batch_result(response)
def update_batch(self, batch_id, batch):
"""Updates the text batch with the given identifier.
:param str batch_id: identifier of the batch
:param batch: the update description
:type batch: MtBatchTextSmsUpdate or MtBatchBinarySmsUpdate
:returns: the updated batch
:rtype: MtBatchTextSmsResult
"""
if hasattr(batch, 'udh'):
fields = serialize.binary_batch_update(batch)
else:
fields = serialize.text_batch_update(batch)
result = self._post(self._batch_url(batch_id), fields)
return deserialize.batch_result(result)
def cancel_batch(self, batch_id):
"""Cancels the batch with the given batch identifier.
:param str batch_id: the batch identifier
:returns: nothing
"""
self._delete(self._batch_url(batch_id))
def fetch_batch(self, batch_id):
"""Fetches the batch with the given batch identifier.
:param str batch_id: batch identifier
:returns: the corresponding batch
:rtype: MtBatchSmsResult
"""
result = self._get(self._batch_url(batch_id))
return deserialize.batch_result(result)
def fetch_batches(self,
page_size=None,
senders=None,
tags=None,
start_date=None,
end_date=None):
"""Fetch the batches matching the given filter.
Note, calling this method does not actually cause any network
traffic. Listing batches in XMS may return the result over
multiple pages and this call therefore returns an object of
the type :class:`.Pages`, which will fetch result pages as
needed.
:param int page_size: Maximum number of batches to retrieve per page.
:param senders: Fetch only batches having one of these senders.
:type senders: set[str] or None
:param tags: Fetch only batches having one or more of these tags.
:type tags: set[str] or None
:param start_date: Fetch only batches sent at or after this date.
:type start_date: date or None
:param end_date: Fetch only batches sent before this date.
:type end_date: date or None
:returns: the result pages
:rtype: Pages
"""
def fetcher(page):
"""Helper"""
params = {'page': page}
if page_size:
params['page_size'] = page_size
if senders:
params['from'] = ','.join(sorted(senders))
if tags:
params['tags'] = ','.join(sorted(tags))
if start_date:
params['start_date'] = start_date.isoformat()
if end_date:
params['end_date'] = end_date.isoformat()
query = urlencode(params)
result = self._get(self._url('/batches?' + query))
return deserialize.batches_page(result)
return api.Pages(fetcher)
def create_batch_dry_run(self, batch, num_recipients=None):
"""Simulates sending the given batch.
The method takes an optional argument for instructing XMS to
respond with per-recipient statistics, if non-null then this
number of recipients will be returned in the result.
:param MtBatchSmsCreate batch: the batch to simulate
:param num_recipients:
number of recipients to show in per-recipient result
:type num_recipients: int or None
:returns: result of dry-run
:rtype: MtBatchDryRunResult
"""
if hasattr(batch, 'udh'):
fields = serialize.binary_batch(batch)
else:
fields = serialize.text_batch(batch)
path = '/batches/dry_run'
if num_recipients:
path += '?per_recipient=true'
path += '&number_of_recipients=%d' % num_recipients
response = self._post(self._url(path), fields)
return deserialize.batch_dry_run_result(response)
def fetch_batch_tags(self, batch_id):
"""Fetches the tags associated with the given batch.
:param str batch_id: the batch identifier
:returns: a set of tags
:rtype: set[str]
"""
result = self._get(self._batch_url(batch_id, '/tags'))
return deserialize.tags(result)
def replace_batch_tags(self, batch_id, tags):
"""Replaces the tags of the given batch.
:param str batch_id: identifier of the batch
:param set[str] tags: the new set of batch tags
:returns: the new batch tags
:rtype: set[str]
"""
fields = serialize.tags(tags)
result = self._put(self._batch_url(batch_id, '/tags'), fields)
return deserialize.tags(result)
def update_batch_tags(self, batch_id, tags_to_add, tags_to_remove):
"""Updates the tags of the given batch.
:param str batch_id: batch identifier
:param set[str] tags_to_add: tags to add to batch
:param set[str] tags_to_remove: tags to remove from batch
:returns: the updated batch tags
:rtype: set[str]
"""
fields = serialize.tags_update(tags_to_add, tags_to_remove)
result = self._post(self._batch_url(batch_id, '/tags'), fields)
return deserialize.tags(result)
def fetch_delivery_report(self, batch_id, kind=None,
status=None, code=None):
"""Fetches a delivery report for a batch.
The report type can be one of ``None``, "full", or "summary".
When "full" the report includes the individual recipients.
When ``None`` then the XMS default value is used.
The report can be further limited by status and code. For
example, to retrieve a summary report limited to messages
having delivery status "Delivered" or "Failed" and codes "0",
"11", or "400", one could call::
client.fetch_delivery_report(
'MyBatchId',
'summary',
{'Delivered', 'Failed'},
{0, 11, 400});
If the non-identifier parameters are left unspecified then the
XMS defaults are used. In particular, all statuses and codes
are included in the report.
:param str batch_id: identifier of the batch
:param kind: delivery report type
:type kind: str or None
:param set[str] status: statuses to fetch
:param set[int] code: codes to fetch
:returns: the batch delivery report
:rtype: BatchDeliveryReport
"""
params = {}
if kind:
params['type'] = kind
if status:
params['status'] = ','.join(sorted(status))
if code:
params['code'] = ','.join([str(i) for i in sorted(code)])
path = '/delivery_report'
if params:
path += '?' + urlencode(params)
result = self._get(self._batch_url(batch_id, path))
return deserialize.batch_delivery_report(result)
def fetch_recipient_delivery_report(self, batch_id, recipient):
"""Fetches a delivery report for a specific batch recipient.
:param str batch_id: the batch identifier
:param str recipient: the batch recipient
:returns: the delivery report
:rtype: BatchRecipientDeliveryReport
"""
path = '/delivery_report/' + quote_plus(recipient)
result = self._get(self._batch_url(batch_id, path))
return deserialize.batch_recipient_delivery_report(result)
def create_group(self, group):
"""Creates the given group.
:param GroupCreate group: group description
:returns: the created group
:rtype: GroupResult
"""
fields = serialize.group_create(group)
response = self._post(self._url('/groups'), fields)
return deserialize.group_result(response)
def replace_group(self, group_id, group):
"""Replaces the group with the given group identifier.
:param str group_id: identifier of the group
:param GroupCreate group: new group description
:returns: the group after replacement
:rtype: GroupResult
"""
fields = serialize.group_create(group)
result = self._put(self._group_url(group_id), fields)
return deserialize.group_result(result)
def update_group(self, group_id, group):
"""Updates the group with the given identifier.
:param str group_id: identifier of the group
:param GroupUpdate group: the update description
:returns: the updated batch
:rtype: GroupResult
"""
fields = serialize.group_update(group)
result = self._post(self._group_url(group_id), fields)
return deserialize.group_result(result)
def delete_group(self, group_id):
"""Deletes the group with the given group identifier.
:param str group_id: the group identifier
:returns: Nothing
"""
self._delete(self._group_url(group_id))
def fetch_group(self, group_id):
"""Fetches the group with the given group identifier.
:param str group_id: group identifier
:returns: the corresponding group
"""
result = self._get(self._group_url(group_id))
return deserialize.group_result(result)
def fetch_groups(self, page_size=None, tags=None):
"""Fetch the groups matching the given filter.
Note, calling this method does not actually cause any network
traffic. Listing groups in XMS may return the result over
multiple pages and this call therefore returns an object of
the type :class:`.Pages`, which will fetch result pages as
needed.
:param page_size: Maximum number of groups to retrieve per page.
:type page_size: int or None
:param tags: Fetch only groups having or or more of these tags.
:type tags: set[str] or None
:returns: the result pages
:rtype: Pages
"""
def fetcher(page):
"""Helper"""
params = {'page': page}
if page_size:
params['page_size'] = page_size
if tags:
params['tags'] = ','.join(sorted(tags))
query = urlencode(params)
result = self._get(self._url('/groups?' + query))
return deserialize.groups_page(result)
return api.Pages(fetcher)
def fetch_group_members(self, group_id):
"""Fetches the that belong to the given group.
:param str group_id: the group identifier
:returns: a set of MSISDNs
:rtype: set[str]
"""
result = self._get(self._group_url(group_id, '/members'))
return deserialize.group_members(result)
def fetch_group_tags(self, group_id):
"""Fetches the tags associated with the given group.
:param str group_id: the group identifier
:returns: a set of tags
:rtype: set[str]
"""
result = self._get(self._group_url(group_id, '/tags'))
return deserialize.tags(result)
def replace_group_tags(self, group_id, tags):
"""Replaces the tags of the given group.
:param str group_id: identifier of the group
:param set[str] tags: the new set of group tags
:returns: the new group tags
:rtype: set[str]
"""
fields = serialize.tags(tags)
result = self._put(self._group_url(group_id, '/tags'), fields)
return deserialize.tags(result)
def update_group_tags(self, group_id, tags_to_add, tags_to_remove):
"""Updates the tags of the given group.
:param str group_id: group identifier
:param set[str] tags_to_add: tags to add to group
:param set[str] tags_to_remove: tags to remove from group
:returns: the updated group tags
:rtype: set[str]
"""
fields = serialize.tags_update(tags_to_add, tags_to_remove)
result = self._post(self._group_url(group_id, '/tags'), fields)
return deserialize.tags(result)
def fetch_inbound(self, inbound_id):
"""Fetches the inbound message with the given identifier.
The returned message is either textual or binary.
:param str inbound_id: message identifier
:returns: the fetched message
:rtype: MoTextSms or MoBinarySms
"""
eiid = quote_plus(inbound_id)
if not eiid:
raise ValueError("Empty inbound ID given")
result = self._get(self._url("/inbounds/" + eiid))
return deserialize.mo_sms(result)
def fetch_inbounds(self,
page_size=None,
recipients=None,
start_date=None,
end_date=None):
"""Fetch inbound messages matching the given filter.
Note, calling this method does not actually cause any network
traffic. Listing inbound messages in XMS may return the result
over multiple pages and this call therefore returns an object
of the type :class:`.Pages`, which will fetch result pages as
needed.
:param page_size: The maximum number of messages to retrieve per page.
:type page_size: int or None
:param recipients: Fetch only messages having one of these recipients.
:type recipients: set[str] or None
:param start_date: Fetch only messages received at or after this date.
:type start_date: date or None
:param end_date: Fetch only messages received before this date.
:type end_date: date or None
:returns: the result pages
:rtype: Pages
"""
def fetcher(page):
"""Helper"""
params = {'page': page}
if page_size:
params['page_size'] = page_size
if recipients:
params['to'] = ','.join(sorted(recipients))
if start_date:
params['start_date'] = start_date.isoformat()
if end_date:
params['end_date'] = end_date.isoformat()
query = urlencode(params)
result = self._get(self._url('/inbounds?' + query))
return deserialize.inbounds_page(result)
return api.Pages(fetcher)
| clxcommunications/sdk-xms-python | clx/xms/client.py | client.py | py | 24,270 | python | en | code | 3 | github-code | 13 |
44178297742 | from selenium import webdriver
#from selenium.webdriver.common.keys import Keys
#from selenium.webdriver.support import expected_conditions as EC import re
import re
import os
import time
import subprocess
import io
from PIL import Image
import base64
user_history = []
id_history = []
proxy='127.0.0.1:7890'
env = os.environ.copy()
env.update({
'http_proxy' : proxy,
'https_proxy' : proxy
})
## utils
def unique(x:list):
return list(set(x))
def save_base64(img_bs64:str, save_path:str):
img = Image.open(io.BytesIO(base64.decodebytes(bytes(img_bs64,'utf-8'))))
img.save(f'{save_path}')
pass
def good_face(img_path: str):
return True
## get a chrome driver
def chrome_gen(proxy:str='127.0.0.1:7890'):
chrome_options = webdriver.ChromeOptions()
chrome_options.add_argument(f'--proxy-server={proxy}')
chrome = webdriver.Chrome(chrome_options=chrome_options)
chrome.get('https://youtube.com')
## cookie = {}
#chrome.add_cookie(cookie)
#chrome.refresh()
return chrome
## given: id_list ['1287832478203748213'], save_path="./downloaded/tiktok/"
def download(id_list:list, save_path:str, cookie_path:str="./cookies/tiktok.cookie",verbose:bool = True, proxy:str="127.0.0.1:7890"):
for i, id in enumerate(id_list):#[0:3]:
if verbose: print(f"[ INFO ] Downloading {id} of task {i}/{len(id_list)}")
cmd = ["instaloader", "--", f"-{id}"]
#cmd.extend(["--output", f"{save_path}/{id}"])
#cmd.extend(["-S", "res:480"]) ## quality
#cmd.extend(['--external-downloader','aria2c'])
# cmd.extend(["--no-danmaku", "--no-subtitle", "--video-only"])
if id in id_history: continue
subprocess.run(cmd,env=env)
id_history.append(id)
return
## download videos from current page
def find_video_id(chrome, id_reg="https://www.instagram.com/p/(.*)", verbose:bool=True):
links = chrome.find_elements(by='xpath', value="//a[@href]")
id_img_list = [] ## the pair for [(id, img)]
## summarized all links
for link in links:
id_url = link.get_attribute('href')
if re.match(id_reg,id_url) is not None:
img_url = link.find_element(by='xpath',value='//img[@src]').get_attribute('src')
id_img_list.append((id_url[28:39], img_url))
id_img_list = unique(id_img_list)
if verbose: print(f"[ INFO ] All {len(id_img_list)} pages for found")
id_list = []
for (id, img_bs64) in id_img_list:
if verbose: print(f"[ INFO ] downloading and testing img id={id}")
## instagram use base64 encoding for imgs
save_base64(img_bs64[22:], f'./imgs/{id}.png')
#subprocess.run(['wget',f'{img_url}','-O', f'./imgs/{id}.img', '--quiet'], env=env)
if good_face("temp.img") and id not in id_history:
id_list.append(id)
if verbose: print(f"[ INFO ] Will download video_id: {id_list}")
return id_list
## scrape video by search
def by_search(chrome,keywords:list, scroll_times:int=3):
for keyword in keywords:
if not os.path.exists(f"downloaded/{keyword}") : os.mkdir(f"downloaded/{keyword}")
chrome.get(f"https://douyin.com/search/{keyword}?type=video")
time.sleep(3)
## get douyin.com, filter options: type=video
for scroll in range(scroll_times):
id_list = find_video_id(chrome)
download(id_list, save_path=f"downloaded/tiktok/{keyword}")
chrome.execute_script("window.scrollTo(0,document.body.scrollHeight)")
time.sleep(4) ## wait for webscripts ready
return
## download video from ID recommendation recursively
def by_ID(chrome, id_list:list, depth:int, verbose:bool=True):
if verbose: print(f"[ INFO ] download by ID: Recursive-depth = {depth}")
download(id_list, save_path=f"./downloaded/tiktok/")
for id in id_list:
chrome.get(f"https://www.douyin.com/video/{id}")
time.sleep(2)
id_list = find_video_id(chrome)
if id_list == []: input("Please Verficate it...")
if depth != 0 :
by_ID(chrome, id_list, depth-1)
return
if __name__ == "__main__":
chrome = chrome_gen()
#by_explore(chrome)
by_search(chrome,["vlog"])
chrome.quit()
| Vermillion-de/spider | src/youtube.py | youtube.py | py | 4,287 | python | en | code | 1 | github-code | 13 |
24601642704 | import random
from locust import HttpUser, task, between
from aueb_api.aueb_api.settings import STRESS_TEST_TOKEN
class AuebApiUser(HttpUser):
""" A user querying the AUEB API. """
wait_time = between(1, 5) # Wait for 1 to 5 seconds
header = {
'Authorization': 'Token {}'.format(STRESS_TEST_TOKEN),
}
DEPARTMENTS = [
'ΠΛΗΡ',
'ΔΕΟΣ',
]
@task(2)
def query_exams(self):
""" Query the standard API endpoint, returning all exams. """
self.client.get('/api/exams')
@task
def query_exams_by_department(self):
""" Use a department while querying the exams. """
url = '/api/exams?department={}'.format(random.choices(self.DEPARTMENTS))
self.client.get(url)
def on_start(self):
self.client.headers = self.header
| KonstantinosVasilopoulos/aueb_api | stress_test/locustfile.py | locustfile.py | py | 837 | python | en | code | 3 | github-code | 13 |
36331589455 | # -*- coding: utf -*-
# Create your views here.
from django.shortcuts import render_to_response, redirect
from django.template import RequestContext
from osto.models import Barcode
from tilit.models import Account, AccountCode
def maybe_get_price(barcode):
try:
return barcode.product.current_price
except AttributeError:
return 0
def index(request):
account = request.session.get('account', None)
items = request.session.get('items', None)
if items == None:
items = request.session['items'] = []
if request.method == 'POST' and request.POST['inputfield']:
inputfield = request.POST['inputfield']
if inputfield == 'Clear':
items = request.session['items'] = []
account = request.session['account'] = None
elif inputfield == 'Accept':
account = AccountCode.get_or_code(account)
if hasattr(account,"account") and len(items) > 0:
account.account.debit(sum([maybe_get_price(Barcode.get_or_code(item)) for item in items]))
items = request.session['items'] = []
account = request.session['account'] = None
else:
if account:
items.append(inputfield)
request.session.modified = True
else:
account = request.session['account'] = inputfield
request.session.modified = True
account = AccountCode.get_or_code(account)
if not hasattr(account,"account"):
request.session["account"]=None
inputlabel=u"Nimi tilille"
elif account:
inputlabel=u"Syötä tuote"
else:
inputlabel=u"Syötä tili"
items = [Barcode.get_or_code(x) for x in items]
data = {'account': account, 'inputlabel' : inputlabel, 'items': items[::-1], 'total' : sum([maybe_get_price(item) for item in items])}
return render_to_response('osto/index.html',
data,
context_instance=RequestContext(request))
def new_account(request):
request.session['items']=[]
request.session['account']=None
if request.method == "POST" and request.POST["nimi"] and request.POST["code"]:
account=Account(name=request.POST["nimi"],balance="0.00")
account.save()
account_code=AccountCode(account=account,code=request.POST["code"])
account_code.save()
return redirect("/")
| HelsinkiHacklab/limu | limuweb/osto/views.py | views.py | py | 2,412 | python | en | code | 4 | github-code | 13 |
13748428820 | #!/usr/bin/env python3
import socket, struct, time, os, netifaces, netaddr, nmap, pprint, re, subprocess, logging, argparse, resource
from netaddr import *
from portscan import scan_ports
from pwd import getpwnam
import getpass
logging.getLogger("scapy.runtime").setLevel(logging.ERROR)
global addr, netmask, cidr, allhosts, scannedhosts, strscan, xx, results
def OpenFile():
global f
f = open('portscan_output.txt', 'w+')
def WriteFile(strscan):
#print(strscan)
f.write(str(strscan))
def CloseFile():
f.close()
def OpenFileLimit():
global soft, hard, softlimit, hardlimit
ulimitmax = subprocess.getoutput('ulimit -Sn')
softlimit = subprocess.getoutput('ulimit -Sn')
hardlimit = subprocess.getoutput('ulimit -Hn')
nulimitmax = int(ulimitmax)
soft, hard = resource.getrlimit(resource.RLIMIT_NOFILE)
if os.name.split()[0] == 'posix':
if nulimitmax < 10000:
print()
if int(hardlimit) < 10000:
newhardlimit = 10000
else:
newhardlimit = hardlimit
print("Open File limit too small, setting Open Files limit to 10000")
resource.setrlimit(resource.RLIMIT_NOFILE, (int(newhardlimit), int(newhardlimit)))
s, h = resource.getrlimit(resource.RLIMIT_NOFILE)
print("Soft: %s, Hard: %s\n" % (s, h))
print()
def GetIPAndHostName():
fqdn = socket.getfqdn()
global curip
curip = socket.gethostbyname(fqdn)
print ("%s, %s" % (fqdn, curip))
def GetSubNet():
global ip
ip = IPNetwork(curip)
def CurDateAndTime():
os.environ['TZ'] = 'US/Pacific'
time.tzset()
ztime = time.strftime("%a, %d %b %Y %H:%M:%S %Z", time.localtime())
print ("%s" % ztime)
def get_address_in_network():
global addr, netmask, cidr, allhosts
network = netaddr.IPNetwork(ip)
parser = argparse.ArgumentParser()
parser.add_argument('-p', action='store_true', help='scan ports')
parser.add_argument('-f', action='store_true', help='write output to a file')
results = parser.parse_args()
for iface in netifaces.interfaces():
if iface == 'lo':
continue
addresses = netifaces.ifaddresses(iface)
if network.version == 4 and netifaces.AF_INET in addresses:
addr = addresses[netifaces.AF_INET][0]['addr']
netmask = addresses[netifaces.AF_INET][0]['netmask']
cidr = netaddr.IPNetwork("%s/%s" % (addr, netmask))
print ("using Current interface: %s" % iface)
allhosts = IPNetwork(cidr)
# print("All hosts: %d" % (allhosts.size - 2))
print ("IPADDR: %s" % addr)
print ("NETMASK: %s" % netmask)
print ("CIDR: %s " % cidr)
print ("Nodes in Subnet: %d" % (allhosts.size - 2))
print()
starttime = time.time()
nm = nmap.PortScanner()
a = nm.scan(hosts=str(cidr), arguments=' --system-dns -F -T4 -R -sS -PE --min-rate 1000 --max-retries 1')
endtime = time.time()
totaltime = endtime - starttime
n = 0
print('-------------------------------------------------------------------------------')
print('Hostname/FQDN :: IP Address :: Mac :: Vendor')
print('-------------------------------------------------------------------------------')
print()
if results.f:
WriteFile("-------------------------------------------------------------------------------" + "\n" +
"Hostname/FQDN :: IP Address :: Mac :: Vendor" + "\n" +
"-------------------------------------------------------------------------------" + "\n")
print("\nwriting to file portscan_output.txt in current directory.\n")
WriteFile("\n")
for k,v in a['scan'].items():
if str(v['status']['state']) == 'up':
n += 1
pp = pprint.PrettyPrinter(indent=0)
splithost = str(v['hostnames'])
splitip = str(v['addresses']['ipv4'])
splitvendor = str(v['vendor'])
zhost = str(splithost.split("'")[7:8])
newzhost = re.sub('[\[\]\']', '', zhost)
if len(newzhost) <= 4:
Znewzhost = 'NULL'
else:
Znewzhost = newzhost
ZipAddr = splitip
zvendor1 = str(splitvendor.split("'")[1:2])
zvendor2 = str(splitvendor.split("'")[3:4])
newzvendor1 = re.sub('[\[\]\'\{\}]', '', zvendor1)
newzvendor2 = re.sub('[\[\]\'\{\}]', '', zvendor2)
if len(newzvendor1) != 0:
Znewzvendor1 = newzvendor1
else:
Znewzvendor1 = 'NULL'
if len(newzvendor2) != 0:
Znewzvendor2 = newzvendor2
else:
Znewzvendor2 = 'NULL'
print("%s :: %s :: %s :: %s" % (Znewzhost, ZipAddr, Znewzvendor1, Znewzvendor2))
if results.p:
scan_ports(ZipAddr, 1)
if results.f:
WriteFile(Znewzhost + " :: ")
WriteFile(ZipAddr + " :: ")
WriteFile(Znewzvendor1 + " :: ")
WriteFile(Znewzvendor2 + " :: ")
WriteFile("\n")
print ("Number of hosts found in Subnet: %d" % n)
if results.f:
WriteFile("Nodes in Subnet: %d\n" % n)
print ("Arp scan in %f seconds...." % totaltime)
if results.f:
WriteFile("Arp scan in %f seconds....\n\n" % totaltime)
def main():
astarttime = time.time()
currentuser = getpass.getuser()
userUID = getpwnam(currentuser).pw_uid
#print(currentuser)
#print(userUID)
if userUID > 0:
print("Must be root user to run this!\n\n")
exit()
parser = argparse.ArgumentParser()
parser.add_argument('-p', action='store_true', help='scan ports')
parser.add_argument('-f', action='store_true', help='write output to a file')
results = parser.parse_args()
if results.f:
OpenFile()
OpenFileLimit()
CurDateAndTime()
GetIPAndHostName()
GetSubNet()
get_address_in_network()
aendtime = time.time()
atotaltime = aendtime - astarttime
print()
print("Total time: %f seconds" % atotaltime)
if results.f:
WriteFile("Total time: %f seconds\n\n" % atotaltime)
print()
if int(softlimit) < 10000:
print("reverting Open files to original setting Soft: %s Hard: %s" % (softlimit, hardlimit))
resource.setrlimit(resource.RLIMIT_OFILE, (soft, hard))
ss, hh = resource.getrlimit(resource.RLIMIT_NOFILE)
print("Soft: %s, Hard: %s\n" % (ss, hh))
if results.f:
CloseFile()
main()
| nixgeekk-zz/netscan | netscan.py | netscan.py | py | 7,158 | python | en | code | 0 | github-code | 13 |
36031646192 | from selenium import webdriver
from selenium.webdriver.common.by import By
import time
url='https://www.youtube.com/@fifa/videos'
driver = webdriver.Chrome()
driver.get(url)
videos = driver.find_elements(By.CLASS_NAME,"style-scope ytd-rich-grid-media")
for vid in videos:
title = vid.find_elements(By.XPATH,'//*[@id="details"]')
views = vid.find_elements(By.XPATH,'//*[@id="metadata-line"]/span[1]')
when = vid.find_elements(By.XPATH, '//*[@id="metadata-line"]/span[2]')
print(title)
time.sleep(30) | itsazogdbbk/Internship | Selenium_Python/selenium_scrape.py | selenium_scrape.py | py | 514 | python | en | code | 0 | github-code | 13 |
12543623470 | #from collections import deque
import random
import numpy as np
#from utilities import transpose_list
from collections import namedtuple, deque
import torch
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
BUFFER_SIZE = int(1e6) # replay buffer size
BATCH_SIZE = 1024 # minibatch size
class ReplayBuffer:
"""Fixed-size buffer to store experience tuples."""
#def __init__(self, action_size, buffer_size, batch_size, seed):
def __init__(self, buffer_size = BUFFER_SIZE, batch_size = BATCH_SIZE, seed=0):
"""Initialize a ReplayBuffer object.
Params
======
buffer_size (int): maximum size of buffer
batch_size (int): size of each training batch
"""
#self.action_size = action_size
self.memory = deque(maxlen=buffer_size) # internal memory (deque)
self.batch_size = batch_size
self.experience = namedtuple("Experience", field_names=["obs", "state", "actions", "rewards", "next_obs", "next_state", "dones"])
self.seed = random.seed(seed)
def push(self, obs, state, actions, rewards, next_obs, next_state, dones):
"""Add a new experience to memory."""
e = self.experience(obs, state, actions, rewards, next_obs, next_state, dones)
self.memory.append(e)
def sample(self):
"""Randomly sample a batch of experiences from memory."""
experiences = random.sample(self.memory, k=self.batch_size)
#obs - lista 1000, obs[0] - numpy(3,14)
#obs_full - lista 1000, obs_full[0] - numpy(14,)
#action - lista 1000, action[0] - numpy(3,2)
#reward - lista 1000, reward[0] - numpy(3,)
#next_obs - lista 1000, obs[0] - numpy(3,14)
#next_obs_full - lista 1000, obs_full[0] - numpy(14,)
#done - lista 1000, numpy(3,)
#obs_vector = torch.from_numpy(np.vstack([[e.obs] for e in experiences if e is not None])).float().to(device)
#states = torch.from_numpy(np.vstack([e.state for e in experiences if e is not None])).float().to(device)
#actions_vector = torch.from_numpy(np.vstack([[e.actions] for e in experiences if e is not None])).float().to(device)
#rewards_vector = torch.from_numpy(np.vstack([[e.rewards] for e in experiences if e is not None])).float().to(device)
#next_obs_vector = torch.from_numpy(np.vstack([[e.next_obs] for e in experiences if e is not None])).float().to(device)
#next_states = torch.from_numpy(np.vstack([e.next_state for e in experiences if e is not None])).float().to(device)
#dones_vector = torch.from_numpy(np.vstack([[e.dones] for e in experiences if e is not None]).astype(np.uint8)).float().to(device)
#obs_vector = np.vstack([[e.obs] for e in experiences if e is not None])
#states = np.vstack([e.state for e in experiences if e is not None])
#actions_vector = np.vstack([[e.actions] for e in experiences if e is not None])
#rewards_vector = np.vstack([[e.rewards] for e in experiences if e is not None])
#next_obs_vector = np.vstack([[e.next_obs] for e in experiences if e is not None])
#next_states = np.vstack([e.next_state for e in experiences if e is not None])
#dones_vector = np.vstack([[e.dones] for e in experiences if e is not None])
obs_vector = [ np.array(e.obs) for e in experiences if e is not None]
states = [ np.array(e.state) for e in experiences if e is not None]
actions_vector = [ np.array(e.actions) for e in experiences if e is not None]
rewards_vector = [ np.array(e.rewards) for e in experiences if e is not None]
next_obs_vector = [ np.array(e.next_obs) for e in experiences if e is not None]
next_states = [ np.array(e.next_state) for e in experiences if e is not None]
dones_vector = [ np.array(e.dones) for e in experiences if e is not None]
#[ np.array(e.dones) for e in exp if e is not None]
return (obs_vector, states, actions_vector, rewards_vector, next_obs_vector, next_states, dones_vector)
def __len__(self):
"""Return the current size of internal memory."""
return len(self.memory)
| CristyanGil/Project3-DRL-Udacity | buffer.py | buffer.py | py | 4,309 | python | en | code | 0 | github-code | 13 |
33400538462 | import glob
import os.path
import torch
from torchvision import transforms
from PIL import Image
from torch.utils.data import Dataset
PICTURE_SIZE = 96
LABEL_MAPPING = {
'cat': 0,
'dog': 1,
}
transform = transforms.Compose([
transforms.CenterCrop(PICTURE_SIZE),
transforms.Resize(PICTURE_SIZE),
transforms.PILToTensor(),
])
def image_to_tensor(picture_path: str) -> torch.Tensor:
with Image.open(picture_path) as image:
# image.thumbnail((PICTURE_SIZE, PICTURE_SIZE))
return transform(image) / 255.0
class CatsDogsDataset(Dataset):
path = None
is_training = False
images = []
labels = []
data = []
def _load(self):
files = glob.glob(self.path + "/*.jpg")
for f in files:
filename = os.path.basename(f)
image = image_to_tensor(f)
label, _id, _ = filename.split('.')
self.images.append(image)
self.labels.append(LABEL_MAPPING[label])
def __init__(self, path: str, is_training: bool = True):
self.is_training = is_training
self.path = path
files = glob.glob(self.path + "/*.jpg")
for f in files:
self.data.append(f)
def __len__(self):
return len(self.data)
def __getitem__(self, index):
image_path = self.data[index]
file_name = os.path.basename(image_path)
image = image_to_tensor(image_path)
if self.is_training:
label, _id, _ = file_name.split('.')
label = LABEL_MAPPING[label]
else:
label, _id = file_name.split(',')
label = int(label)
return image, label
| twolights/pytorch-practice | practices/dogs_and_cats/datasets/preprocess.py | preprocess.py | py | 1,669 | python | en | code | 0 | github-code | 13 |
7236810384 | import cv2
import numpy as np
import warnings
from skimage.feature import peak_local_max
from astropy.io import fits
from astropy.convolution import convolve, Gaussian2DKernel
from astropy.stats import sigma_clipped_stats, gaussian_fwhm_to_sigma
from photutils.background import MedianBackground, Background2D
# from photutils.detection import find_peaks
from photutils.datasets import apply_poisson_noise
from photutils.segmentation import deblend_sources, detect_sources, detect_threshold
from galmask.utils import find_farthest_label, find_closest_label, getLargestCC, getCenterLabelRegion
def galmask(
image, npixels, nlevels, nsigma, contrast, min_distance, num_peaks, num_peaks_per_label,
connectivity=4, kernel=None, seg_image=None, mode="1", remove_local_max=True, deblend=False
):
"""Removes background source detections from input galaxy image.
:param image: Galaxy image.
:type image: numpy.ndarray
:param npixels: The no. of connected pixels that an object must have to be detected.
:type npixels: int
:param nlevels: No. of multi-thresholding levels to be used for deblending.
:type nlevels: int
:param nsigma: No. of standard deviations per pixel above the background to be considered as a part of source.
:type nsigma: float
:param contrast: Controls the level of deblending.
:type contrast: float
:param min_distance: The minimum distance between distinct local peaks.
:type min_distance: int
:param num_peaks: Maximum no. of peaks in the image.
:type num_peaks: int
:param num_peaks_per_label: Maximum no. of peaks per label of the segmentation map.
:type num_peaks_per_label: int
:param connectivity: Either 4 or 8, defaults to 4.
:type connectivity: int
:param kernel: Kernel array to use for convolution.
:type kernel: numpy.ndarray, optional
:param seg_image: Segmentation map.
:type seg_image: numpy.ndarray, optional
:param mode: If "1", then performs connected component analysis else not, defaults to "1".
:type mode: str, optional
:param remove_local_max: Whether to remove labels corresponding to peak local maximas far away from the center. If unsure, keep the default, defaults to `True`.
:type remove_local_max: bool, optional
:param deblend: Whether to deblend sources in the image. Set to True if there are nearby/overlapping sources in the image, defaults to `True`.
:type deblend: bool, optional
:return cleaned_seg_img: Cleaned segmentation after removing unwanted source detections.
:rtype: numpy.ndarray
The method assumes the image is more or less centered on the galaxy image. If it is not, one needs to shift the
image appropriately.
"""
_img_shape = image.shape
# Convolve input image with a 2D kernel.
if kernel is None: # Create a Gaussian kernel with FWHM = 3.
sigma = 3.0 * gaussian_fwhm_to_sigma
kernel = Gaussian2DKernel(sigma, x_size=3, y_size=3)
kernel.normalize()
kernel = kernel.array
if not np.allclose(np.sum(kernel), 1.0):
warnings.warn("Kernel is not normalized.")
if np.isclose(np.sum(kernel), 0.0):
raise ValueError("Kernel sum is close to zero. Cannot use it for convolution.")
bkg_level = MedianBackground().calc_background(image)
image_bkg_subtracted = image - bkg_level
convolved_data = convolve(image_bkg_subtracted, kernel, normalize_kernel=True)
if seg_image is None:
threshold = detect_threshold(image_bkg_subtracted, nsigma=nsigma, background=0.0)
# Since threshold includes background level, we do not subtract background from data that is input to detect_sources.
objects = detect_sources(convolved_data, threshold, npixels=npixels)
if objects is None:
raise ValueError("No source detection found in the image!")
objects = objects.data
else:
objects = seg_image.copy()
objects = objects.astype('uint8')
if mode == "0":
x = getCenterLabelRegion(objects)
galmasked = np.multiply(x, image)
return galmasked, x
if deblend:
segm_deblend = deblend_sources(convolved_data, objects, npixels=npixels, nlevels=nlevels, contrast=contrast).data
else:
segm_deblend = objects.copy()
if remove_local_max:
local_max = peak_local_max(
convolved_data, min_distance=min_distance, num_peaks=num_peaks, num_peaks_per_label=num_peaks_per_label, labels=segm_deblend
)
index = find_farthest_label(local_max, _img_shape[0]/2, _img_shape[1]/2)
val = segm_deblend[local_max[index][0], local_max[index][1]]
segm_deblend[segm_deblend==val] = 0
segm_deblend_copy = segm_deblend.copy()
if mode == "1":
segm_deblend_copy = segm_deblend_copy.astype('uint8')
# Below line has issues with opencv-python-4.5.5.64, so to fix, downgrade the version.
nb_components, objects_connected, stats, centroids = cv2.connectedComponentsWithStats(segm_deblend_copy, connectivity=connectivity) # We want to remove all detections far apart from the central galaxy.
max_label, max_size = max([(i, stats[i, cv2.CC_STAT_AREA]) for i in range(1, nb_components)], key=lambda x: x[1])
# The below lines of code are to ensure that if the central source (which is of concern) is not of maximum area, then we should
# not remove it, and instead select the center source by giving it more priority than area-wise selection. If indeed the central
# source is of the greatest area, then we can just select it.
closest_to_center_label = find_closest_label(centroids[1:, :], _img_shape[0]/2, _img_shape[1]/2) # The first row in `centroids` corresponds to the whole image which we do not want. So consider all rows except the first.
if closest_to_center_label == max_label:
x = (objects_connected == max_label).astype(float)
else:
x = (objects_connected == closest_to_center_label).astype(float)
elif mode == "2":
x = getCenterLabelRegion(segm_deblend_copy)
galmasked = np.multiply(x, image)
return galmasked, x
| Yash-10/galmask | galmask/galmask.py | galmask.py | py | 6,178 | python | en | code | 6 | github-code | 13 |
12323877451 | import random
import time
from selenium import webdriver
from selenium.webdriver.common.by import By
driver = webdriver.Chrome()
driver.execute_cdp_cmd("Page.addScriptToEvaluateOnNewDocument", {
"source": """
Object.defineProperty(navigator, 'webdriver', {
get: () => undefined
})
"""
})
driver.get('https://www.wjx.cn/jq/87910206.aspx')
driver.implicitly_wait(10)
divs = driver.find_elements(By.CSS_SELECTOR, '.div_question')
print(divs)
print(len(divs))
"""
单选题: 1 - 10 列表中的索引范围是 0 - 9
多选题: 11 - 12 列表中的索引范围是 10 - 11
"""
one_choice = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9]
any_choice = [10, 11]
# 单选题
for i in one_choice:
lis = divs[i].find_elements(By.CSS_SELECTOR, 'ul li')
# 随机选取一个
random.choice(lis).click()
# 多选题
for j in any_choice:
lis = divs[j].find_elements(By.CSS_SELECTOR, 'ul li')
# 随机选取三个
# choices 可以随机选择多个, 有可能会重复
# random.choices(lis).click()
obj = random.sample(lis, k=3)
for k in obj:
k.click()
# 点击提交
driver.find_element(By.CSS_SELECTOR, '.submitbutton').click()
time.sleep(2)
# 点击智能认证
driver.find_element(By.CSS_SELECTOR, '.sm-ico .sm-ico-wave').click()
input()
driver.quit()
| lll13508510371/Scrapping | 11 多进程与多线程/01 上课代码/第10次作业讲解/0315-10-00000002-山禾-selenium问卷星.py | 0315-10-00000002-山禾-selenium问卷星.py | py | 1,350 | python | en | code | 0 | github-code | 13 |
72485782738 | from random import randint
def main():
"""Main function to call sub-functions"""
# get and validate initial pencil input
pencils = pencil_valid()
# get and validate player choice
player = player_check()
while pencils > 0:
printer(pencils, player)
if player == "John":
pencils = update_pencils(pencils)
else:
bot_pencils = player_bot(pencils)
print(bot_pencils)
pencils -= bot_pencils
player = switch_player(player)
print(f"{player} won!")
def pencil_valid():
""" Gets and validates input for the number of pencils
:return (int) pencils: the number of pencils to start the game
"""
while True:
try:
# get input and check if int and positive
pencils = int(input("How many pencils would you like to use: "))
# check if zero
if pencils == 0:
print("The number of pencils should be positive")
else:
return pencils
except TypeError and ValueError:
print("The number of pencils should be numeric")
def player_check():
""" Gets and validates player name input
:return (str) player: chooses which player will begin the game
"""
while True:
try:
player_names = ["John", "Jack"]
player = input("Who will be the first (John, Jack):")
if player not in player_names:
print("Choose between *Name1* and *Name2*")
else:
return player
except TypeError:
pass
def printer(pencil_count, player_name):
""" Prints number of pencils remaining followed by next players turn.
:parameter (int) pencil_count: the number of pencils to print
:parameter (str) player_name: the name of the user to print
:return: None
"""
for i in range(pencil_count):
print("|", end="")
print() # for new line
print(f"{player_name}'s turn:")
def switch_player(player_name):
""" Switches the user after each turn
:parameter (str) player_name: the name of the current player
:return (str): the name of the player
"""
if player_name == "John":
return "Jack"
else:
return "John"
def update_pencils(pencil_count):
""" Gets and validates the number of pencils chosen each turn
:parameter (int) pencil_count: the number of pencils left in the game
:return (int): the number of pencils left after current user choice
"""
while True:
try:
num_allowed = [1, 2, 3]
request = int(input())
if request not in num_allowed:
print("Possible values: '1', '2', or '3'")
elif pencil_count - request < 0:
print("Too many pencils were taken")
else:
return pencil_count - request
except TypeError and ValueError:
print("Possible values: '1', '2', or '3'")
def player_bot(pencil_count):
""" Automates a second player with optimal rules for playing
:parameter (int) pencil_count: the current number of pencils left in the game
:return (int): returns the optimal number of pencils to take in a turn
"""
if pencil_count == 1: # exception
return 1
for sequence in range(2, 6):
pencil_calc = (pencil_count - sequence)/4
if pencil_calc == 0: # base case
if sequence == 5:
return randint(1, 3)
else:
return sequence - 1
elif int(pencil_calc) == pencil_calc:
if sequence == 5: # return "random"
return randint(1, 3)
else:
return sequence - 1 # return sequence base
""" TESTING CODE FOR PLAYER_BOT
one = [1]
two = [2, 6, 10, 14]
three = [3, 7, 11, 15]
four = [4, 8, 12, 16]
five = [5, 9, 13, 17]
for i in range(17):
if i in one:
print(i, player_bot(i) == 1)
elif i in two:
print(i, player_bot(i) == 1)
elif i in three:
print(i, player_bot(i) == 2)
elif i in four:
print(i, player_bot(i) == 3)
elif i in five:
print(i, player_bot(i) == (0 or 2))
"""
main()
| jdstrongpdx/JetBrains-last_pencil | Last Pencil/task/game.py | game.py | py | 4,286 | python | en | code | 0 | github-code | 13 |
71110521299 | from pathlib import Path
import pytest
from pycromanager import start_headless
from pycromanager.acq_util import cleanup
from pymmcore_plus import find_micromanager
from pycro_plus_bridge import pycroCorePlus
@pytest.fixture(scope="session")
def core():
mm_app_path = Path(find_micromanager())
start_headless(
str(mm_app_path),
str(mm_app_path / "MMConfig_demo.cfg"),
timeout=5000,
convert_camel_case=False,
)
core = pycroCorePlus()
yield core
cleanup()
raise ValueError("we got here?")
def test_for_smoke(core: pycroCorePlus):
core = pycroCorePlus()
core.snapImage()
img1 = core.getImage()
assert img1.sum() > 0
# pymmc+ method that hopefully calls back through java
img2 = core.snap()
assert img2.sum() > 0
| ianhi/pycro-plus-bridge | tests/test_pycro_plus_bridge.py | test_pycro_plus_bridge.py | py | 804 | python | en | code | 0 | github-code | 13 |
17043364094 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import json
from alipay.aop.api.constant.ParamConstants import *
class AlipayOfflineProviderEquipmentAuthRemoveModel(object):
def __init__(self):
self._device_id = None
self._device_type = None
self._ext_info = None
self._merchant_pid = None
self._operator = None
self._operator_id = None
@property
def device_id(self):
return self._device_id
@device_id.setter
def device_id(self, value):
self._device_id = value
@property
def device_type(self):
return self._device_type
@device_type.setter
def device_type(self, value):
self._device_type = value
@property
def ext_info(self):
return self._ext_info
@ext_info.setter
def ext_info(self, value):
self._ext_info = value
@property
def merchant_pid(self):
return self._merchant_pid
@merchant_pid.setter
def merchant_pid(self, value):
self._merchant_pid = value
@property
def operator(self):
return self._operator
@operator.setter
def operator(self, value):
self._operator = value
@property
def operator_id(self):
return self._operator_id
@operator_id.setter
def operator_id(self, value):
self._operator_id = value
def to_alipay_dict(self):
params = dict()
if self.device_id:
if hasattr(self.device_id, 'to_alipay_dict'):
params['device_id'] = self.device_id.to_alipay_dict()
else:
params['device_id'] = self.device_id
if self.device_type:
if hasattr(self.device_type, 'to_alipay_dict'):
params['device_type'] = self.device_type.to_alipay_dict()
else:
params['device_type'] = self.device_type
if self.ext_info:
if hasattr(self.ext_info, 'to_alipay_dict'):
params['ext_info'] = self.ext_info.to_alipay_dict()
else:
params['ext_info'] = self.ext_info
if self.merchant_pid:
if hasattr(self.merchant_pid, 'to_alipay_dict'):
params['merchant_pid'] = self.merchant_pid.to_alipay_dict()
else:
params['merchant_pid'] = self.merchant_pid
if self.operator:
if hasattr(self.operator, 'to_alipay_dict'):
params['operator'] = self.operator.to_alipay_dict()
else:
params['operator'] = self.operator
if self.operator_id:
if hasattr(self.operator_id, 'to_alipay_dict'):
params['operator_id'] = self.operator_id.to_alipay_dict()
else:
params['operator_id'] = self.operator_id
return params
@staticmethod
def from_alipay_dict(d):
if not d:
return None
o = AlipayOfflineProviderEquipmentAuthRemoveModel()
if 'device_id' in d:
o.device_id = d['device_id']
if 'device_type' in d:
o.device_type = d['device_type']
if 'ext_info' in d:
o.ext_info = d['ext_info']
if 'merchant_pid' in d:
o.merchant_pid = d['merchant_pid']
if 'operator' in d:
o.operator = d['operator']
if 'operator_id' in d:
o.operator_id = d['operator_id']
return o
| alipay/alipay-sdk-python-all | alipay/aop/api/domain/AlipayOfflineProviderEquipmentAuthRemoveModel.py | AlipayOfflineProviderEquipmentAuthRemoveModel.py | py | 3,427 | python | en | code | 241 | github-code | 13 |
36178162426 | from flask import Flask,Response,request,json
from flask_pymongo import MongoClient
import logging as log
from bson.json_util import dumps
app = Flask(__name__)
class MongoAPI:
def __init__(self,data):
log.basicConfig(level=log.DEBUG, format='%(asctime)s %(levelname)s:\n%(message)s\n')
#self.client = MongoClient("mongodb://localhost:27017/")
self.client = MongoClient("mongodb://mymongo:27017/")
database = data['database']
collection = data['collection']
cursor = self.client[database]
self.collection = cursor[collection]
self.data = data
def read(self):
log.info("Showing all Data")
documents = self.collection.find()
output = [{item: data[item] for item in data if item != '_id'} for data in documents]
return output
def write(self,data):
log.info("Writing Data")
new_doc = data['Document']
response = self.collection.insert_one(new_doc)
output = {'Status':'Inserted Successfully','ID':str(response.inserted_id)}
return output
@app.route('/')
def base():
return Response(response=json.dumps({'Status':'UP'}),
status=200,
mimetype='application/json')
@app.route('/get_records',methods=['GET'])
def retrieve_all():
data =request.json
if data is None or data =={}:
return Response(response=json.dumps({"Error":"Please provide connection info"}),
status=400,
mimetype='application/json')
obj1 = MongoAPI(data)
response = obj1.read()
return Response(response=json.dumps(response),
status=200,
mimetype='application/json')
@app.route('/get_records',methods=['POST'])
def add_employee():
data = request.json
if data is None or data =={} or 'Document' not in data:
return Response(response=json.dumps({"Error":"Please provide connection info"}),
status=400,
mimetype='application/json')
obj1 = MongoAPI(data)
response = obj1.write(data)
return Response(response=json.dumps(response),
status=200,
mimetype='application/json')
'''
def hello():
return 'Connected Successfully! '
'''
if __name__ == '__main__':
app.run(debug=True,port=5002,host='0.0.0.0') | prao02/flask_app | test.py | test.py | py | 2,457 | python | en | code | 0 | github-code | 13 |
10967114087 | import networkx as nx
import matplotlib.pyplot as plt
from homework2.task_defaults import DATA_ROOT, RESULTS_ROOT
class Task3:
prefix = 'task3'
def run(self):
for i in range(1, 4):
graph = self.read_txt(DATA_ROOT / f'sample3.{i}.txt')
fig, ax = plt.subplots(1, 1, figsize=(16, 10))
pos = nx.circular_layout(graph)
node_colors = ["skyblue" if n in {"s", "t"} else "lightgray" for n in graph.nodes]
res_graph = nx.flow.dinitz(graph, s="s", t="t", capacity="capacity", cutoff=16)
edge_colors = ["lightgray" if res_graph[u][v]["flow"] == 0 else "black" for u, v in graph.edges]
edge_labels = {(u, v): f"{res_graph[u][v]['flow']}/{graph[u][v]['capacity']}"
for u, v in graph.edges
if res_graph[u][v]["flow"] != 0}
nx.draw_networkx_nodes(graph,
pos=pos,
ax=ax,
node_size=500,
node_color=node_colors)
nx.draw_networkx_labels(graph,
pos=pos,
ax=ax,
font_size=14)
nx.draw_networkx_edges(graph,
pos=pos,
ax=ax,
edge_color=edge_colors)
nx.draw_networkx_edge_labels(graph,
pos=pos,
ax=ax,
edge_labels=edge_labels,
font_size=14)
ax.set_title(f"Cutoff value = {16}; Max Flow = {res_graph.graph['flow_value']}", size=22)
fig.tight_layout()
plt.savefig(RESULTS_ROOT / f'sample3.{i}.png')
@staticmethod
def read_txt(file):
graph = nx.DiGraph()
with open(file) as f:
for line in f.readlines():
if line[0] != f'#':
n1, n2, capacity, weight = line.split()
graph.add_edge(n1, n2, weight=int(weight), capacity=int(capacity))
return graph
| Sumrak1337/modern_computer_technologies | homework2/tasks/task3.py | task3.py | py | 2,279 | python | en | code | 0 | github-code | 13 |
9570546696 | from tkinter import *
def find_gcd():
num1 = e1.get()
num2 = e2.get()
e1.delete(0, END)
e2.delete(0, END)
if num1.isdigit() and num2.isdigit():
num1 = int(num1)
num2 = int(num2)
if num2 > num1:
num1, num2 = num2, num1 # swap in python
r = num1 % num2
while r != 0:
num1 = num2
num2 = r
r = num1 % num2
gcd = str(num2)
st.set("GCD: " + gcd)
else:
print("Error, Please enter an integer number")
root = Tk()
root.geometry("400x450+1000+100")
root.title('Find GCD')
root.resizable(False, False)
root.config(bg="#7BDBF8")
l1 = Label(root, text="Enter The First Number:", font=("Arial", 18))
l1.pack(pady=20)
e1 = Entry(root, font=("Arial", 14, "bold"), justify="center")
e1.pack()
l2 = Label(root, text="Enter The Second Number:", font=("Arial", 18))
l2.pack(pady=20)
e2 = Entry(root, font=("Arial", 14, "bold"), justify="center")
e2.pack()
b1 = Button(root, text="Click to find GCD", relief="raised", overrelief="flat", bd=6,
font=("Arial", 16, "bold"), bg="blue", fg="white", command=find_gcd)
b1.pack(pady=40)
st = StringVar()
st.set("GCD: ")
l3 = Label(root, textvariable=st, font=("Arial", 18, "bold"))
l3.pack(pady=20)
root.mainloop()
| hozan66/College-Practical-Code | Python (Cryptography)/pythonProject(GUI)/GUI3(find GCD using function).py | GUI3(find GCD using function).py | py | 1,294 | python | en | code | 1 | github-code | 13 |
28364344675 | from aws_cdk import (
Aws,
aws_iam as iam,
aws_secretsmanager as secretsmanager,
CfnOutput
)
from constructs import Construct
class IAMSetup(Construct):
def __init__(self, scope: Construct, construct_id: str, props: dict, **kwargs) -> None:
super().__init__(scope, construct_id, **kwargs)
# Getting props
project_name = props.get("project_name", "demo")
roles = props.get("iam_properties", {}).get("assume_roles", [])
user_name = props.get("iam_properties", {}).get("user_name", f"{project_name}_user")
context = props.get("iam_properties", {}).get("context", {"from": "2023-01-01T00:00:00Z",
"up": "2023-06-30T23:59:59Z"})
rs = []
if roles:
for r in roles:
rs.append(r["arn"])
else:
rs = [f"arn:aws:iam::{Aws.ACCOUNT_ID}:role/cdk-hnb659fds-cfn-exec-role-{Aws.ACCOUNT_ID}-{Aws.REGION}"]
# Creating statement
st = iam.PolicyStatement(actions=["sts:AssumeRole"], effect=iam.Effect.ALLOW, resources=rs)
ext_id = secretsmanager.Secret(self, "IDSecret", secret_name=f"/{project_name}/SecretExternalID",
generate_secret_string=secretsmanager.SecretStringGenerator(
secret_string_template='{"type": "role_ext_id"}',
exclude_punctuation=True,
exclude_characters="-`",
generate_string_key='external_id'
)
)
self.role = iam.Role(self, "CustomRole", role_name=f"Role{project_name.capitalize()}",
assumed_by=iam.PrincipalWithConditions(iam.AccountPrincipal(account_id=Aws.ACCOUNT_ID),
conditions={
"StringLike": {
"sts:RoleSessionName": "${aws:username}"
},
"DateGreaterThan": {
"aws:CurrentTime": context["from"]},
"DateLessThan": {
"aws:CurrentTime": context["up"]}
}
),
# Using a SecretValue here risks exposing your secret.
# Call AWS Secrets Manager directly in your runtime code. Call 'secretValue.unsafeUnwrap()' if you understand and accept the risks.
external_ids=[ext_id.secret_value_from_json('external_id').unsafe_unwrap()]
)
self.role.add_to_policy(st)
self.role.node.add_dependency(ext_id)
# Creating iam programmatic user
self.user = iam.User(self, user_name, user_name=user_name, path=f"/{project_name}/")
# Create Access key and secret key for user
access_key = iam.AccessKey(self, "AccessKey", user=self.user)
secretsmanager.Secret(self, "Secret",
secret_name=f"/{project_name}/UserAccessKey",
secret_string_value=access_key.secret_access_key
)
# Create Group
self.group = iam.Group(self, f"Group_{project_name}", group_name=f"{project_name}Group")
self.user.add_to_group(self.group)
# Grant User assume role
self.role.grant_assume_role(self.group)
# Audit user Activity
CfnOutput(self, "RoleARN", value=self.role.role_arn, description=f"Role ARN ")
| velez94/cdkv2_prog_user_deploy | src/constructs/iam_role.py | iam_role.py | py | 4,239 | python | en | code | 1 | github-code | 13 |
71270823697 | import sys
from math import ceil
from collections import defaultdict
from collections import OrderedDict
class Reaction:
def __init__(self, string):
out, inp = self.parse_reaction(string)
self.out_quantity = out[0]
self.out_chemical = out[1]
self.inp_chemicals = {}
for inp_q, inp_c in inp:
self.inp_chemicals[inp_c] = inp_q
def parse_reaction(self, reaction):
inp, out = reaction.split('=>')
inp = inp.strip().split(',')
inp = tuple(map(lambda x: x.strip().split(' '), inp))
inp = tuple(map(lambda x: (int(x[0]), x[1]), inp))
out = out.strip().split(' ')
out = (int(out[0]), out[1])
return out, inp
def reverse(self, quantity):
"""
Given the requirement of getting `quantity` of the
output chemical, return the list of all the input
chemicals and their corresponding quantities to
create the output
If the reaction produces more than required, return
also the leftover quantity
"""
batches = ceil(quantity / self.out_quantity)
leftover = self.out_quantity * batches - quantity
result = []
for inp_c, inp_q in self.inp_chemicals.items():
result.append((inp_c, batches * inp_q))
return result, leftover
class Reactions:
def __init__(self, data):
self.r_dict = {}
for r_str in data:
r = Reaction(r_str)
self.r_dict[r.out_chemical] = r
def calculate_ore(self, fuel=1):
ore = 0
required = OrderedDict()
required['FUEL'] = fuel
leftovers = defaultdict(int)
while required:
chemical, quantity = required.popitem(last=False)
reaction = self.r_dict[chemical]
inputs, leftover = reaction.reverse(quantity - leftovers[chemical])
leftovers[chemical] = leftover
for inp_c, inp_q in inputs:
if inp_c == 'ORE':
ore += inp_q
continue
required[inp_c] = required.get(inp_c, 0) + inp_q
return ore
def use_all_ore(self, ore=1000000000000):
fuel = 0
jump_size = 1000000
while not (self.calculate_ore(fuel) <= ore and self.calculate_ore(fuel + 1) > ore):
while self.calculate_ore(fuel) <= ore:
fuel += jump_size
fuel -= jump_size
jump_size //= 2
return fuel
def sol1(data):
reactions = Reactions(data)
return reactions.calculate_ore()
def sol2(data):
reactions = Reactions(data)
return reactions.use_all_ore()
def main():
with open(sys.argv[1]) as f:
data = f.readlines()
print(sol1(data))
print(sol2(data))
if __name__ == '__main__':
main() | Lammatian/AdventOfCode | 2019/14/sol.py | sol.py | py | 2,847 | python | en | code | 1 | github-code | 13 |
48472110814 | # -*- coding: utf-8 -*-
"""
Created on Sat May 28 12:35:50 2022
@author: Vijaya
"""
import math
import matplotlib.pyplot as plt
import pandas as pd
import pickle
with open ('locus_pos_list.pickle', 'rb') as f:
locus_pos = pickle.load(f)
standardRoom_mic_locs2 = [
[1.5,3.5, 0.9], [5.5,3.5,0.9], # mic 1 # mic 2
]
datasetDistance = []
for j in range(len(locus_pos)):
datasetDistance2 = {"mic": [], "mic1-distance": [], "mic2-distance": []}
for i in range(len(locus_pos[j])):
mic1_distance = 0.0
mic2_distance = 0.0
for j in range(len(standardRoom_mic_locs2)):
if j == 0:
mic1_distance = math.dist(locus_pos[j][i] , standardRoom_mic_locs2[j])
print(round(mic1_distance,2))
datasetDistance2["mic1-distance"].append(round(mic1_distance,2))
else:
mic2_distance = math.dist(locus_pos[j][i], standardRoom_mic_locs2[j])
print(round(mic2_distance,2))
datasetDistance2["mic2-distance"].append(round(mic2_distance,2))
if round(mic1_distance,2) < round(mic2_distance,2):
datasetDistance2["mic"].append('01')
else:
datasetDistance2["mic"].append('02')
datasetDistance.append(datasetDistance2)
print(datasetDistance)
with open ('C:/Users/Vijaya/PhD/locus_move_distance.pickle', 'wb' ) as f:
pickle.dump(datasetDistance, f)
| vnraomitnala/Device_Handover | plotMicrophoneTransition_basedOn_Distance_locus1_multiple.py | plotMicrophoneTransition_basedOn_Distance_locus1_multiple.py | py | 1,495 | python | en | code | 0 | github-code | 13 |
24257212328 | """
App's entrypoint
"""
from fastapi import FastAPI
from fastapi.middleware.cors import CORSMiddleware
from src.schema.prediction import Prediction
from src.model.model import predict
# App object
app = FastAPI(
title="DASS model deployment",
version=1.0
)
# CORS
app.add_middleware(
CORSMiddleware,
allow_origins=["*"],
allow_credentials=True,
allow_methods=["*"],
allow_headers=["*"],
)
# Views
@app.get("/")
async def index():
return {"msg": "Hello world"}
@app.post("/")
async def prediction(pred: Prediction):
return predict(pred.to_dataframe())
| ggonzr/mind-safeguard-api | src/api.py | api.py | py | 594 | python | en | code | 0 | github-code | 13 |
40061024094 | import cv2
import numpy as np
#Delation avalia em cada pixel os pixels brancos na vizinhança. Caso ao menos um seja 1 (brancos) então o pixel
#é transformado para branco. Caso contrário, ele permanece 0 (preto).
image = cv2.imread('j.png', 0)
kernel = np.ones((5, 5), np.uint8)
dilation = cv2.dilate(image, kernel, iterations=1)
cv2.imshow('window', image)
cv2.imshow('window2', dilation)
cv2.waitKey(0)
cv2.destroyAllWindows()
| wshusheng/Python-OpenCV | dilation.py | dilation.py | py | 449 | python | pt | code | 0 | github-code | 13 |
27809793104 | l=[]
num=int(input("ENTER TOTAL NUMBER OF ELEMENT NEEDED : "))
for i in range(0,num):
value=int(input("ENTER NUMBER : "))
l.append(value)
t=tuple(l)
max=t[0]
for i in range(0,len(t)):
if(t[i]>max):
max=t[i]
print("MAXIMUN ELEMENT IS : ",max) | chetanbhatt10/WEEK--DAY5 | a22.py | a22.py | py | 271 | python | en | code | 0 | github-code | 13 |
14523043830 | from typing import List
import pandas as pd
from Warehouse.Attribute import Attribute, ForeignKey, SCDAttribute
class Dimension:
def __init__(self, name, metadata, dimensions, language="POSTGRES"):
self.name = name
self.attributes = []
self.dimensions = dimensions
self.metadata = metadata
self.attr_id = 0
self.language = language
self.global_counter = 0
for row in dimensions[dimensions["dim_name"]==self.name].to_numpy():
self.add_attribute(row[1],row[2],row[3])
if row[3].lower().endswith('id'):
self.attr_id = len(self.attributes)-1
def add_attribute(self, schema, table, attribute):
"""Adds an attribute to the dimension. It passes the same metadata used for the dimension.
The method checks if the attribute is a foreign key, and if it is it adds a ForeignKey object instead of a regular Attribute object.
Args:
schema (string): schema name
table (string ): table name
attribute (string): attribute/column name
"""
metadata = self.metadata
# check if attribute is foreign key
if (pd.isna(metadata.loc[(metadata['table_schema'].str.lower() == schema.lower()) & (metadata['table_name'].str.lower() == table.lower()) & (metadata['column_name'].str.lower() == attribute.lower()), 'foreign_table_schema'].to_numpy()[0])):
attr = Attribute(schema, table, attribute, metadata)
else:
# get foreign schema, table and attribute names and create a ForeignKey attribute.
f_schema = metadata.loc[(metadata['table_schema'].str.lower() == schema.lower()) & (metadata['table_name'].str.lower() == table.lower()) & (metadata['column_name'].str.lower() == attribute.lower()), 'foreign_table_schema'].to_numpy()[0]
f_table = metadata.loc[(metadata['table_schema'].str.lower() == schema.lower()) & (metadata['table_name'].str.lower() == table.lower()) & (metadata['column_name'].str.lower() == attribute.lower()), 'foreign_table_name'].to_numpy()[0]
f_attribute = metadata.loc[(metadata['table_schema'].str.lower() == schema.lower()) & (metadata['table_name'].str.lower() == table.lower()) & (metadata['column_name'].str.lower() == attribute.lower()), 'foreign_column_name'].to_numpy()[0]
if f_schema == schema and f_table == table and f_attribute == attribute:
attr = Attribute(schema,table,attribute,metadata)
else:
attr = ForeignKey(schema, table, attribute, f_schema, f_table, f_attribute, metadata)
self.attributes.append(attr)
return
def _get_attributes(self, instanceName):
return map(lambda x: instanceName+'.'+x, self.attributes)
def ddl(self):
"""Like a toString() method. When called it prints out the DDL for creating the Dimension in PostgreSQL
Returns:
string: PostgreSQL for creating the dimension
"""
if self.language == 'POSTGRES':
sql = [f"CREATE TABLE {self.name}(\n skey serial primary key"]
elif self.language == 'MSSQL':
sql = [f"CREATE TABLE {self.name}(\n skey int identity(1,1) primary key"]
for attribute in self.attributes:
sql.append(attribute.ddl())
return ",\n".join(sql)+"\n);\n"
def dml(self):
select_string = []
for attr in self.attributes:
select_string.append(f'{attr.schema}.{attr.table}.{attr.attribute}')
select_string = f'SELECT {", ".join(select_string)}'
from_string = [f'FROM']
tables = set()
for fkey in self.get_foreign_keys():
from_string.append(f'{fkey.schema}.{fkey.table} INNER JOIN {fkey.f_schema}.{fkey.f_table} on ({fkey.schema}.{fkey.table}.{fkey.attribute} = {fkey.f_schema}.{fkey.f_table}.{fkey.f_attribute})')
tables.add(f'{fkey.schema}.{fkey.table}')
tables.add(f'{fkey.f_schema}.{fkey.f_table}')
for attr in self.attributes:
if f'{attr.schema}.{attr.table}' not in tables:
tables.add(f'{attr.schema}.{attr.table}')
from_string.append(f'{attr.schema}.{attr.table}')
from_string.append(f'LEFT OUTER JOIN {self.name} on '
f'({self.attributes[self.attr_id].schema}.{self.attributes[self.attr_id].table}.{self.attributes[self.attr_id].attribute} = {self.name}.{self.attributes[self.attr_id].attribute})')
from_string = ' '.join(from_string)
where_string = f'WHERE {self.name}.skey is NULL;'
return f'INSERT INTO {self.name}({",".join([attr.attribute for attr in self.attributes if not isinstance(attr, SCDAttribute)])})\n' + select_string + '\n' + from_string + '\n' + where_string + '\n'
def get_table_names(self):
out = []
for attr in self.attributes:
out.append(attr.table)
return out
def get_foreign_keys(self) -> List[Attribute]:
"""returns a list of all attributes that are foreign keys
Returns:
list: list of ForeignKey objects
"""
fkeys = []
for attribute in self.attributes:
if isinstance(attribute, ForeignKey):
fkeys.append(attribute)
return fkeys
def get_etl_name(self):
return f"sp_performETL_{self.name.split('_')[1].capitalize()}"
def get_table_alias(self, table_name):
if table_name.startswith('dim_'):
table_alias = 'd'
table_name = table_name[4:]
else:
table_alias = ''
names = table_name.split('.')
for n in names:
table_alias += n[0]
table_alias += str(self.global_counter)
self.global_counter += 1
return table_alias
class DimensionSCD1(Dimension):
def __init__(self, name, metadata, dimensions, language):
super().__init__(name, metadata, dimensions, language)
def sp_performETL(self):
if self.language == 'POSTGRES':
out = [f'CREATE OR REPLACE PROCEDURE sp_performETL_{self.name.split("_")[1].capitalize()}()\nLANGUAGE PLPGSQL\nAS $$\nBEGIN\n']
elif self.language == 'MSSQL':
out = [f'CREATE PROCEDURE sp_performETL_{self.name.split("_")[1].capitalize()}\nAS\nBEGIN\n']
# insert izraz
out.append(self.dml())
# UPDATE
set_string = []
for attr in self.attributes:
set_string.append(f'{self.name}.{attr.attribute}={attr.schema}.{attr.table}.{attr.attribute}')
set_string = f'SET {", ".join(set_string)}'
from_string = [f'FROM']
tables = set()
for fkey in self.get_foreign_keys():
from_string.append(f'{fkey.schema}.{fkey.table} INNER JOIN {fkey.f_schema}.{fkey.f_table} on ({fkey.schema}.{fkey.table}.{fkey.attribute} = {fkey.f_schema}.{fkey.f_table}.{fkey.f_attribute})')
tables.add(f'{fkey.schema}.{fkey.table}')
tables.add(f'{fkey.f_schema}.{fkey.f_table}')
for attr in self.attributes:
if f'{attr.schema}.{attr.table}' not in tables:
tables.add(f'{attr.schema}.{attr.table}')
from_string.append(f'{attr.schema}.{attr.table}')
from_string = ' '.join(from_string)
where_string = []
for idx, attr in enumerate(self.attributes):
if idx != self.attr_id:
where_string.append(f'{attr.schema}.{attr.table}.{attr.attribute} != {self.name}.{attr.attribute}')
where_string = f'WHERE {self.attributes[self.attr_id].schema}.{self.attributes[self.attr_id].table}.' \
f'{self.attributes[self.attr_id].attribute} = {self.name}.{self.attributes[self.attr_id].attribute}' \
f' and' + ' (' + ' or '.join(where_string) + ');'
update_string = f'UPDATE {self.name}\n' + set_string + '\n' + from_string + '\n' + where_string + '\n'
out.append(update_string)
if self.language == 'POSTGRES':
out.append('END $$;')
elif self.language == "MSSQL":
out.append('END;\n\n')
return "\n".join(out)
class DimensionSCD2(Dimension):
def __init__(self, name, metadata, dimensions, language):
super().__init__(name, metadata, dimensions, language)
self._add_scd_columns()
def _add_scd_columns(self):
if self.language == 'POSTGRES':
start = SCDAttribute(None, self.name, 'start_date')
else:
start = SCDAttribute(None, self.name, 'start_date', default='getdate()')
end = SCDAttribute(None, self.name, 'end_date', default="'9999-12-31'")
start.set_data_type('timestamp', False)
end.set_data_type('timestamp', True)
self.attributes.append(start)
self.attributes.append(end)
def dml(self):
select_string = []
for attr in self.attributes:
if not isinstance(attr, SCDAttribute):
select_string.append(f'{attr.schema}.{attr.table}.{attr.attribute}')
select_string = f'SELECT {", ".join(select_string)}'
from_string = [f'FROM']
tables = set()
for fkey in self.get_foreign_keys():
from_string.append(f'{fkey.schema}.{fkey.table} INNER JOIN {fkey.f_schema}.{fkey.f_table} on ({fkey.schema}.{fkey.table}.{fkey.attribute} = {fkey.f_schema}.{fkey.f_table}.{fkey.f_attribute})')
tables.add(f'{fkey.schema}.{fkey.table}')
tables.add(f'{fkey.f_schema}.{fkey.f_table}')
for attr in self.attributes:
if f'{attr.schema}.{attr.table}' not in tables and not isinstance(attr, SCDAttribute):
tables.add(f'{attr.schema}.{attr.table}')
from_string.append(f'{attr.schema}.{attr.table}')
from_string.append(f'LEFT OUTER JOIN {self.name} on '
f'({self.attributes[self.attr_id].schema}.{self.attributes[self.attr_id].table}.{self.attributes[self.attr_id].attribute} = {self.name}.{self.attributes[self.attr_id].attribute})')
from_string = ' '.join(from_string)
where_string = f'WHERE {self.name}.skey is NULL;'
return f'INSERT INTO {self.name}({",".join([attr.attribute for attr in self.attributes if not isinstance(attr, SCDAttribute)])})\n' + select_string + '\n' + from_string + '\n' + where_string + '\n'
def sp_performETL(self):
if self.language == 'POSTGRES':
out = [f'CREATE OR REPLACE PROCEDURE sp_performETL_{self.name.split("_")[1].capitalize()}()\nLANGUAGE PLPGSQL\nAS $$\nBEGIN\n']
elif self.language == 'MSSQL':
out = [f'CREATE PROCEDURE sp_performETL_{self.name.split("_")[1].capitalize()}\nAS\nBEGIN\n']
# insert izraz
out.append(self.dml())
out.append('-- find modified')
# find modified
if self.language == 'POSTGRES':
select_modified = [f'CREATE TEMP TABLE scd2 AS\nSELECT {self.name}.skey']
else:
select_modified = [f'SELECT {self.name}.skey']
for attr in self.attributes:
if not isinstance(attr, SCDAttribute):
select_modified.append(f'{attr.schema}.{attr.table}.{attr.attribute}')
select_modified = ", ".join(select_modified) + '\n'
if self.language == 'MSSQL':
select_modified += 'INTO scd2\n'
# from izraz
from_modified = [f'FROM']
tables = set()
for fkey in self.get_foreign_keys():
from_modified.append(f'{fkey.schema}.{fkey.table} INNER JOIN {fkey.f_schema}.{fkey.f_table} on ({fkey.schema}.{fkey.table}.{fkey.attribute} = {fkey.f_schema}.{fkey.f_table}.{fkey.f_attribute})')
tables.add(f'{fkey.schema}.{fkey.table}')
tables.add(f'{fkey.f_schema}.{fkey.f_table}')
for attr in self.attributes:
if f'{attr.schema}.{attr.table}' not in tables and not isinstance(attr, SCDAttribute):
tables.add(f'{attr.schema}.{attr.table}')
from_modified.append(f'{attr.schema}.{attr.table}')
from_modified.append(f'INNER JOIN {self.name} on '
f'({self.attributes[self.attr_id].schema}.{self.attributes[self.attr_id].table}.{self.attributes[self.attr_id].attribute} = {self.name}.{self.attributes[self.attr_id].attribute})')
from_modified = ' '.join(from_modified)
from_and = []
for idx, attr in enumerate(self.attributes):
if idx != self.attr_id and not isinstance(attr, SCDAttribute):
from_and.append(f'{attr.schema}.{attr.table}.{attr.attribute} != {self.name}.{attr.attribute}')
from_modified += ' and (' + ' or '.join(from_and) + ')\n'
# where
where_modified = f"WHERE {self.name}.end_date = '9999-12-31';\n"
out.append(select_modified + from_modified + where_modified)
out.append('-- update table')
# update table (scd2)
update_modified = f'UPDATE {self.name}\n'\
f'SET end_date=NOW()\n'\
f'FROM scd2\n'\
f'WHERE scd2.skey = {self.name}.skey;\n'
out.append(update_modified)
out.append('-- add updated rows')
# update rows
add_updated_rows = []
update_rows_attrs = []
for attr in self.attributes:
if not isinstance(attr, SCDAttribute):
update_rows_attrs.append(f'{attr.attribute}')
add_updated_rows.append(f'INSERT INTO {self.name}({", ".join(update_rows_attrs)})')
add_updated_rows.append(f'SELECT {", ".join(update_rows_attrs)}\n'
f'FROM scd2;\n')
out.append("\n".join(add_updated_rows))
if self.language == 'POSTGRES':
out.append('DROP TABLE scd2;\nEND $$;\n\n')
elif self.language == "MSSQL":
out.append('END;\n\n')
return "\n".join(out)
| becutandavid/Generating-SQL-code-for-ETL | Warehouse/Dimension.py | Dimension.py | py | 14,010 | python | en | code | 0 | github-code | 13 |
7179962530 | """
Script used to group each selected object in it's own seperate group
"""
# Standard library imports
# Third party imports
from maya import cmds
# Local application imports
def GroupEachSeperately():
sel = cmds.ls(selection=1)
for object in sel:
cmds.select(object)
cmds.group(name='GRP_{}'.format(object).replace('JNT_',''))
GroupEachSeperately()
| CatAndDogSoup/Maya_Tools | scripts/macros_utils/GroupEachSeperately.py | GroupEachSeperately.py | py | 384 | python | en | code | 9 | github-code | 13 |
23795242571 | import os
import os.path as osp
import sys
import torch
import torch.utils.data
import cv2
import numpy as np
import json
from PIL import Image
import matplotlib.pyplot as plt
import matplotlib.image as mpimg
from torchvision import transforms
RGBD_HOME = 'C:\\Users\\Admin\\Desktop\\ssdRGBD\\data\\RGBD\\'
dict_scenes = dict()
k = 0
for direc in os.listdir(RGBD_HOME):
dict_scenes['RGB_'+str(k)] = list()
dict_scenes['D_'+str(k)] = list()
dict_scenes['BNDBOX_'+str(k)] = list()
dict_scenes['CLASSES_'+str(k)] = list()
dict_scenes['FILENAME_'+str(k)] = str()
for subdirec in os.listdir(osp.join(RGBD_HOME+direc)):
if subdirec == 'rgb':
for rgb in os.listdir(osp.join(RGBD_HOME+direc,subdirec)):
if not rgb.startswith('crops'):
dict_scenes['RGB_'+str(k)].append(mpimg.imread(
osp.join(RGBD_HOME,osp.join(direc,osp.join(subdirec,rgb)))))
else:
if subdirec == 'depth':
for dpt in os.listdir(osp.join(RGBD_HOME+direc,subdirec)):
if not dpt.startswith('crops'):
dict_scenes['D_'+str(k)].append(mpimg.imread(
osp.join(RGBD_HOME,osp.join(direc,osp.join(subdirec,dpt)))))
else:
if subdirec == 'pcd' :
continue
else: #json
dict_scenes['FILENAMES_'+str(k)] = subdirec
with open(osp.join(RGBD_HOME+direc,direc+'_labels.json')) as f:
data = json.load(f)
for i in range(len(data)):
for j in range(len(data[i])-2):
x_min = data[i]['annotations'][j]['x']
y_max = data[i]['annotations'][j]['y']
x_max = x_min + data[i]['annotations'][j]['width']
y_min = y_max - data[i]['annotations'][j]['height']
dict_scenes['BNDBOX_'+str(k)].append([x_min,y_min,x_max,y_max])
if data[i]['annotations'][j]['id'] not in dict_scenes['CLASSES_'+str(k)]:
dict_scenes['CLASSES_'+str(k)].append(data[i]['annotations'][j]['id'])
k += 1
#drgb = dict_scenes['RGB_10']
#ddpt = dict_scenes['D_10']
#dbbx = dict_scenes['BNDBOX_10']
#dcls = dict_scenes['CLASSES_10']
dfnm = dict_scenes['FILENAMES_10']
classes = list()
for i in range(20):
for cursor in dict_scenes['CLASSES_'+str(i)]:
if cursor not in classes:
classes.append(cursor)
class_to_ind = dict()
classes.sort()
i = 0
for cursor in classes:
class_to_ind[cursor] = i
i += 1
class VOCAnnotationTransform(object):
"""Transforms a VOC annotation into a Tensor of bbox coords and label index
Initilized with a dictionary lookup of classnames to indexes
Arguments:
class_to_ind (dict, optional): dictionary lookup of classnames -> indexes
(default: alphabetic indexing of VOC's 20 classes)
keep_difficult (bool, optional): keep difficult instances or not
(default: False)
height (int): height
width (int): width
"""
def __init__(self, class_to_ind=class_to_ind):
self.class_to_ind = class_to_ind
def __call__(self, target, width, height):
"""
Arguments:
target (annotation) : the target annotation to be made usable
will be an JSON file
Returns:
a list containing lists of bounding boxes [bbox coords, class name]
"""
for i in range(20):
for j in dict_scenes['FILENAMES_'+str(i)]:
if dict_scenes['FILENAMES_'+'str(i)'] == target:
bbox = dict_scenes['BNDBOX_'+str(i)][j]
name = dict_scenes['CLASSES_'+str(i)][j]
label_idx = class_to_ind[name]
result = bbox.append(label_idx)
break
else:
continue
return torch.Tensor(result)
class VOCDetection(torch.utils.data.Dataset):
"""VOC Detection Dataset Object
input is image, target is annotation
Arguments:
root (string): filepath to VOCdevkit folder.
image_set (string): imageset to use (eg. 'train', 'val', 'test')
transform (callable, optional): transformation to perform on the
input image
target_transform (callable, optional): transformation to perform on the
target `annotation`
(eg: take in caption string, return tensor of word indices)
dataset_name (string, optional): which dataset to load
(default: 'VOC2007')
"""
def __init__(self, root, dict_scenes, classes, class_to_ind,
transform=None, target_transform=VOCAnnotationTransform(),
dataset_name='ARID'):
self.root = RGBD_HOME
self.transform = transform
self.target_transform = target_transform
self._annopath = osp.join('%s', 'Annotations', '%s.xml')
self._imgpath = osp.join('%s', 'JPEGImages', '%s.jpg')
self.name = dataset_name
self.folders = os.listdir(RGBD_HOME)
self.annotations = list()
self.rgb = list()
self.depth = list()
for i in range(20):
folder = self.folders[i]
for j in range(len(dict_scenes['FILENAMES_'+str(i)])):
if(self.root+folder+'/'+folder+'labels.json') not in self.annotations:
self.annotations.append(
self.root+folder+'/'+folder+'_labels.json')
for i in range(20):
for j in range(len(dict_scenes['RGB_'+str(i)])):
self.rgb.append(dict_scenes['RGB_'+str(i)][j])
for i in range(20):
for j in range(len(dict_scenes['D_'+str(i)])):
self.depth.append(dict_scenes['D_'+str(i)][j])
def __getitem__(self, index):
im, gt, h, w = self.pull_item(index)
return im, gt
def __len__(self):
return len(self.ids)
def pull_item(self, index):
target = self.annotations[index]
img = self.pull_image(index)
if self.target_transform is not None:
target = self.target_transform(target, width, height)
if self.transform is not None:
target = np.array(target)
img, boxes, labels = self.transform(img, target[:, :4], target[:, 4])
# to rgb
img = img[:, :, (2, 1, 0)]
# img = img.transpose(2, 0, 1)
target = np.hstack((boxes, np.expand_dims(labels, axis=1)))
return torch.from_numpy(img).permute(2, 0, 1), target, height, width
# return torch.from_numpy(img), target, height, width
def pull_image(self, index):
'''Returns the original image object at index in PIL form
Note: not using self.__getitem__(), as any transformations passed in
could mess up this functionality.
Argument:
index (int): index of img to show
Return:
PIL img
'''
rgb_arr = self.rgb[index]
rgb_t = torch.from_numpy(rgb_arr)
r = rgb_t[:,:,0]
g = rgb_t[:,:,1]
b = rgb_t[:,:,2]
d_arr = self.depth[index]
d = torch.from_numpy(d_arr)
rgbd = r+g+b+d
return transforms.ToPILImage()(rgbd)
def pull_anno(self, index):
'''Returns the original annotation of image at index
Note: not using self.__getitem__(), as any transformations passed in
could mess up this functionality.
Argument:
index (int): index of img to get annotation of
Return:
list: [img_id, [(label, bbox coords),...]]
eg: ('001718', [('dog', (96, 13, 438, 332))])
'''
anno = self.annotations[index]
folder = self.folders[index]
return_list = list()
return_list.append(folder)
with open(anno) as f:
js = json.load(f)
for i in range(len(js)):
for j in range(len(js[i])):
idx = js[i]['annotations'][j]['id']
h = js[i]['annotations'][j]['height']
w = js[i]['annotations'][j]['width']
x_min = js[i]['annotations'][j]['x']
y_max = js[i]['annotations'][j]['y']
x_max = x_min + w
y_min = y_max - h
return_list.append((idx,(x_min,y_min,x_max,y_max)))
return return_list
def pull_tensor(self, index):
'''Returns the original image at an index in tensor form
Note: not using self.__getitem__(), as any transformations passed in
could mess up this functionality.
Argument:
index (int): index of img to show
Return:
tensorized version of img, squeezed
'''
return transforms.ToTensor()(self.pull_image(index)).unsqueeze_(0)
vvv = VOCDetection(root = RGBD_HOME, classes = classes,
class_to_ind = class_to_ind,
dict_scenes = dict_scenes)
ids = vvv.annotations[1]
rgbs = vvv.rgb[1]
dpts = vvv.depth[1]
d_t = torch.from_numpy(dpts)
dtt = d_t.unsqueeze(2)
dtt.shape
print(ids)
pulled_4d = vvv.pull_image(1)
plt.imshow(pulled_4d)
pulled_4d.save('rgbd.png')
anno = vvv.pull_anno(1)
pulltt = vvv.pull_tensor(1)
print(pulltt.shape)
| FabioBer/RGBDexperiments | rgbd.py | rgbd.py | py | 9,777 | python | en | code | 0 | github-code | 13 |
24204878460 | import asyncio
from playwright.async_api import async_playwright
urls = [
"http://whatsmyuseragent.org/",
"https://whatismyipaddress.com/",
"https://mylocation.org/"
]
async def scrape(url):
async with async_playwright() as p:
for browser_type in [p.chromium, p.firefox, p.webkit]:
browser = await browser_type.launch()
page = await browser.new_page()
await page.goto(url)
page_title = await page.title()
print(f"With browser {browser_type} on page {page_title}")
await page.screenshot(path=f"example{str(browser_type)}.png")
await browser.close()
async def main():
scrape_tasks = [scrape(url) for url in urls]
await asyncio.wait(scrape_tasks)
asyncio.run(main()) | psisysinsight/playwright_sync_python_getting_started | async_example.py | async_example.py | py | 791 | python | en | code | 0 | github-code | 13 |
27364579272 | import os
import pytest
import torch
import torch.distributed as dist
from torch import nn
from torch.nn import functional as F
import slapo
from slapo import set_random_seed
def test_dropout(init_dist):
def verify(model, data, rank, local_rank, world_size, all_close):
out = model(data)
outs = [
torch.zeros(data.shape, dtype=data.dtype).cuda(local_rank)
for _ in range(world_size)
]
dist.all_gather(outs, out.contiguous())
if rank == 0:
if all_close:
for out in outs[1:]:
torch.testing.assert_close(out, outs[0])
else:
for out in outs[1:]:
with pytest.raises(AssertionError):
torch.testing.assert_close(out, outs[0])
class Model(nn.Module):
def forward(self, x):
return F.dropout(x, p=0.5)
world_size = dist.get_world_size()
rank = dist.get_rank()
local_rank = int(os.environ["LOCAL_RANK"])
torch.cuda.set_device(local_rank)
set_random_seed(123, None, None, local_rank)
model = Model()
data = torch.randn((10, 20), requires_grad=True).cuda(local_rank)
dist.broadcast(data, src=0)
# Without fork_rng, the dropout mask is the same across all ranks.
sch = slapo.create_schedule(model)
sch_model, _ = slapo.build(sch)
sch_model.cuda(local_rank)
verify(sch_model, data, rank, local_rank, world_size, all_close=True)
# With fork_rng, the dropout mask is different across all ranks.
sch = slapo.create_schedule(model)
sch.fork_rng()
sch_model, _ = slapo.build(sch)
sch_model.cuda(local_rank)
verify(sch_model, data, rank, local_rank, world_size, all_close=False)
if __name__ == "__main__":
pytest.main([__file__])
| awslabs/slapo | tests/test_fork_rng.py | test_fork_rng.py | py | 1,814 | python | en | code | 120 | github-code | 13 |
1775545396 | # 提示用户输入一个整数
try:
num = int(input("输入一个整数"))
result = 8/num
print(result)
except ZeroDivisionError:
print("除0错误")
except ValueError:
print("数值类型不匹配") | LBJ-Max/basepython | 面向对象/异常2.py | 异常2.py | py | 226 | python | zh | code | 2 | github-code | 13 |
19997328329 | import torch
from torch import nn
import numpy as np
def flatten_trajectories(data):
# merge batch and trajectory dimensions in data dictionary
for key in data.keys():
if torch.is_tensor(data[key]):
if data[key].ndim > 2:
shape = [*data[key].shape]
data[key] = data[key].reshape([shape[0] * shape[1]] + shape[2:])
return data
def unflatten_trajectories(data, trajectory_length):
# unmerge batch and trajectory dimensions in data dictionary
for key in data.keys():
if torch.is_tensor(data[key]):
if data[key].ndim > 1:
shape = [*data[key].shape]
data[key] = data[key].reshape([-1, trajectory_length] + shape[1:])
return data
def collapse_trajectory_dim(x):
B, T = x.shape[:2]
other_dims = x.shape[2:]
return x.view(B * T, *other_dims)
def expand_trajectory_dim(x, T):
other_dims = x.shape[1:]
return x.view(-1, T, *other_dims)
def resize_trajectory(x, size):
# Interpolation for image size, but for tensors with a trajectory dimension
T = x.shape[1]
x = collapse_trajectory_dim(x)
x = nn.functional.interpolate(x, size=size, mode='bilinear', align_corners=False)
x = expand_trajectory_dim(x, T)
return x
def ema_accumulate(model1, model2, decay=0.999):
par1 = dict(model1.named_parameters())
par2 = dict(model2.named_parameters())
for k in par1.keys():
par1[k].data.mul_(decay).add_(par2[k].data, alpha=1 - decay)
class RenderParams:
"""Render parameters.
A simple container for the variables required for rendering.
"""
def __init__(self, Rt, K, samples_per_ray, near, far, alpha_noise_std=0, nerf_out_res=None, mask=None):
self.samples_per_ray = samples_per_ray
self.near = near
self.far = far
self.alpha_noise_std = alpha_noise_std
self.Rt = Rt
self.K = K
self.mask = mask
if nerf_out_res:
self.nerf_out_res = nerf_out_res
| SAITPublic/SinGRAF | models/model_utils.py | model_utils.py | py | 2,028 | python | en | code | 7 | github-code | 13 |
72564202258 | import cv2
import skimage.exposure
import numpy as np
from numpy.random import default_rng
# define random seed to change the pattern
seedval = 55
rng = default_rng(seed=seedval)
def create_segmentation_map(image):
height, width = image.shape
height -= 10
width -= 10
noise = rng.integers(0, 255, (height, width), np.uint8, True)
blur = cv2.GaussianBlur(noise, (0, 0), sigmaX=15,
sigmaY=15, borderType=cv2.BORDER_DEFAULT)
stretch = skimage.exposure.rescale_intensity(
blur, in_range='image', out_range=(0, 255)).astype(np.uint8)
thresh = cv2.threshold(stretch, 175, 255, cv2.THRESH_BINARY)[1]
kernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (9, 9))
result = cv2.morphologyEx(thresh, cv2.MORPH_OPEN, kernel)
result = cv2.morphologyEx(result, cv2.MORPH_CLOSE, kernel)
return result
# cv2.imshow('result', result)
# cv2.waitKey(0)
# cv2.destroyAllWindows()
def create_add_blur(image,mask):
height, width = image.shape
kernel_size = (5, 5) # You can change these values
for i in range(height):
for j in range(width):
if mask[i][j] == 0:
image[i][j] = cv2.GaussianBlur(image[i][j], kernel_size, 0)
return image
| AayushAgrawal2003/Deep-Blur | data/utilts.py | utilts.py | py | 1,300 | python | en | code | 0 | github-code | 13 |
74436065618 | import argparse
import builtins
import re
from github import Github
def run(github_file):
print("About to blindly run {url}.\nType yes if you think that's a good idea.\nHint: it's not.".format(
url=github_file.html_url
))
if input().strip().lower() != "yes":
print("Ok, not running it.")
return False
print("Running it, stand back.")
try:
exec(github_file.decoded_content)
except:
print("It raised an exception.")
return False
print("Was that the right answer?")
return input().strip().lower() == "yes"
def main():
parser = argparse.ArgumentParser(description="Advent of Other People's Code")
parser.add_argument('--token', required=True)
parser.add_argument('--year', required=True)
parser.add_argument('--day', required=True)
parser.add_argument('--input', required=True)
args = parser.parse_args()
file_pattern = re.compile(r'[^\d]*[^1-9]{day}\.py'.format(day=args.day))
print("Patching open() to always return your input file")
original_open = builtins.open
builtins.open = lambda *_args, **_kwargs: original_open(args.input)
print("Searching for repositories")
github = Github(args.token)
repositories = github.search_repositories('advent of code {year}'.format(year=args.year),
sort='updated',
language='python')
for repository in repositories:
print("Searching for a solution in {repository}".format(repository=repository.full_name))
tree = repository.get_git_tree('master', recursive=True).tree
matching_files = [element for element in tree if element.type == 'blob' and file_pattern.match(element.path)]
success = False
for matching_file in matching_files:
if run(repository.get_contents(matching_file.path)):
success = True
break
if success:
break
if __name__ == '__main__':
main()
| jaksi/advent-of-other-peoples-code | advent.py | advent.py | py | 2,040 | python | en | code | 77 | github-code | 13 |
26640162275 | import math
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import mlflow
import seaborn as sns
def save_results(results, fit_times, output_dir, options):
save_csv(results, output_dir, options)
plot_result(results, output_dir, options)
save_time_csv(fit_times, output_dir, options)
mlflow.log_artifacts(output_dir)
def plot_result(results, output_dir, options):
# グラフのタイトル
metrics = options["experiments"]["metrics"]
title_base = f'{metrics} of {options["datasets"]["dataset_name"]}({options["datasets"]["source"]})'
# 実行した手法
methods = options["experiments"]["methods"]
# 全体の結果plot
plot_total_result(results, output_dir, title_base, methods)
# target domainごとのplot
n_domains = len(results["target_domain"].unique())
plot_separated_result(results, output_dir, title_base, n_domains, methods)
# violinplot
plot_violinplot(results, output_dir, title_base)
def plot_total_result(results, output_dir, title_base, methods):
fig= plt.figure(figsize=(14,5))
ax1 = fig.add_subplot(111)
df_all_mean = results.mean()[methods]
df_all_std = results.std()[methods]
ax1.bar(np.arange(len(df_all_mean)), df_all_mean, yerr=df_all_std,
tick_label=df_all_mean.index, ecolor="black", width=0.5)
ax1.set_ylim(bottom=0)
fig.suptitle(f'{title_base} by total')
fig.savefig(f'{output_dir}plot_total.png')
plt.clf()
plt.close()
def plot_separated_result(results, output_dir, title_base, n_domains, methods):
nrows = math.ceil(n_domains/2)
fig, ax = plt.subplots(nrows, 2, figsize=(20, 5*nrows), sharey=True, squeeze=False)
for idx, (domain, df) in enumerate(results.groupby('target_domain')):
df_mean = df.mean()[methods]
df_std = df.std()[methods]
ax[idx//2, idx%2].bar(np.arange(len(df_mean)), df_mean, yerr=df_std,
tick_label=df_mean.index, ecolor='black', width=0.5)
ax[idx//2, idx%2].set_ylim(bottom=0)
ax[idx//2, idx%2].set_title(f'Target domain = {domain}')
# 奇数個プロットする場合は最後のスペースが余るので空欄にする
if idx%2==0:
ax[-1, -1].axis('off')
fig.suptitle(f'{title_base} by target domain')
fig.savefig(f'{output_dir}plot_domains.png')
plt.clf()
plt.close()
def plot_violinplot(results, output_dir, title_base):
# データを縦持ちに変更
df_long = results.set_index("target_domain").stack().reset_index()
df_long.rename(columns={"level_1":"method", 0:"metric"}, inplace=True)
# metrics名取得
metrics = title_base.split(' ')[0]
# plot
fig = plt.figure(figsize=(15,5))
ax = fig.add_subplot(1, 1, 1)
sns.violinplot(x="method", y='metric', hue="target_domain", data=df_long, ax=ax)
ax.set_ylabel(metrics)
ax.set_ylim(bottom=0)
ax.set_title(f'{title_base} violinplot')
fig.savefig(f'{output_dir}plot_violin.png')
plt.clf()
plt.close()
def save_csv(results, output_dir, options):
# 各イテレーションの結果出力
results.to_csv(f'{output_dir}result_all.csv', index=False)
# 実行した手法
methods = options["experiments"]["methods"]
df_mean = pd.DataFrame(index=methods)
df_std = pd.DataFrame(index=methods)
# 全体の結果出力
df_mean['total'] = results.mean()[methods]
df_std['total'] = results.std()[methods]
# target domainごとの結果出力
for domain, df in results.groupby('target_domain'):
df_mean[domain] = df.mean()[methods]
df_std[domain] = df.std()[methods]
df_mean.to_csv(f'{output_dir}rmse_mean.csv')
df_std.to_csv(f'{output_dir}rmse_std.csv')
def save_time_csv(fit_times, output_dir, options):
# 各イテレーションの学習時間の出力
fit_times.to_csv(f'{output_dir}fit_time.csv', index=False)
| raijin0704/TrDART | process/postprocess/save_results.py | save_results.py | py | 3,914 | python | en | code | 0 | github-code | 13 |
17427442098 | """
Serialize Binary Tree
Problem Description
Given the root node of a Binary Tree denoted by A. You have to Serialize the given Binary Tree in the described format.
Serialize means encode it into a integer array denoting the Level Order Traversal of the given Binary Tree.
NOTE:
In the array, the NULL/None child is denoted by -1.
For more clarification check the Example Input.
Problem Constraints
1 <= number of nodes <= 105
Input Format
Only argument is a A denoting the root node of a Binary Tree.
Output Format
Return an integer array denoting the Level Order Traversal of the given Binary Tree.
Example Input
Input 1:
1
/ \
2 3
/ \
4 5
Input 2:
1
/ \
2 3
/ \ \
4 5 6
Example Output
Output 1:
[1, 2, 3, 4, 5, -1, -1, -1, -1, -1, -1]
Output 2:
[1, 2, 3, 4, 5, -1, 6, -1, -1, -1, -1, -1, -1]
Example Explanation
Explanation 1:
The Level Order Traversal of the given tree will be [1, 2, 3, 4, 5 , -1, -1, -1, -1, -1, -1].
Since 3, 4 and 5 each has both NULL child we had represented that using -1.
Explanation 2:
The Level Order Traversal of the given tree will be [1, 2, 3, 4, 5, -1, 6, -1, -1, -1, -1, -1, -1].
Since 3 has left child as NULL while 4 and 5 each has both NULL child.
"""
class queue:
def __init__(self, capacity):
self.capacity = capacity
self.thisQueue = []
self.size = 0
self.f = -1
self.r = -1
def enqueue(self,val):
if self.size == self.capacity:
return -1
self.r = (self.r+1) % self.capacity
self.thisQueue.append(val)
self.size += 1
def dequeue(self):
if self.size == 0:
return -1
self.f = (self.f + 1) % self.capacity
self.size -= 1
def front(self):
return self.thisQueue[(self.f + 1) % self.capacity]
class TreeNode:
def __init__(self, x):
self.val = x
self.left = None
self.right = None
class Solution:
def solve(self, A):
current = A
q = queue(10**5)
q.enqueue(current)
q.enqueue(None)
result=[]
thisLevel=[]
while q.size > 1:
current = q.front()
q.dequeue()
if current == None:
result.extend(thisLevel)
thisLevel=[]
q.enqueue(None)
else:
thisLevel.append(current.val)
if current.left != None:
q.enqueue(current.left)
elif current.val != -1:
q.enqueue(TreeNode(-1))
if current.right != None:
q.enqueue(current.right)
elif current.val != -1:
q.enqueue(TreeNode(-1))
result.extend(thisLevel)
return result
if __name__ == '__main__':
rootNode = TreeNode(1)
newNode = TreeNode(2)
rootNode.left = newNode
newNode = TreeNode(3)
rootNode.right = newNode
TempNode = rootNode.left # 2
newNode = TreeNode(4)
TempNode.left = newNode
newNode = TreeNode(5)
TempNode.right = newNode
TempNode = TempNode.right
TempNode.right = TreeNode(6)
print(Solution().solve(rootNode)) | vigneshSr91/MyProjects | Excercise76-SerializeBinaryTree.py | Excercise76-SerializeBinaryTree.py | py | 3,020 | python | en | code | 0 | github-code | 13 |
41136459194 | from platform_app.models import *
from auth_app.models import *
from django.shortcuts import get_object_or_404
from django.utils import timezone
import copy
__all__ = ("TaskService",)
class TaskService:
def info_match_check(self, user_pk: int, user_team: str) -> bool:
query = User.objects.filter(id=user_pk, team=user_team)
if query.exists():
result = True
message = None
else:
result = False
message = "ID와 TEAM 정보가 일치하지 않습니다."
return result, message
def get_user_data(self, user_pk: int = None) -> list:
## user_pk는 유저의 고유 pk임
if user_pk is not None:
collect_list = []
## 업무 조회 시 하위업무에 자신의 팀이 있는 경우에도 호출하게 해달라 ~
query = User.objects.filter(id=user_pk)
include_query = Task.objects.filter(
create_user=user_pk, is_delete=False
)
for index in include_query:
collect_list.append(index)
data = query[0].team
# query2 = User.objects.all().exclude(id=user_pk)
exclude_query = Task.objects.prefetch_related("sub_set").exclude(
create_user=user_pk
)
# print(exclude_query.sub_set.filter(team=data))
for index in exclude_query:
if index.sub_set.filter(
team=data, is_delete=False
).values_list():
collect_list.append(index)
return collect_list
else:
# print("전체 쿼리 호출")
query = Task.objects.filter(is_delete=False).prefetch_related(
"sub_set"
) ## 전체 쿼리를 미리 호출해서 캐싱
return query
def condition_check(self, user_pk: int, data: dict) -> list:
if data.get("pk", None) is None:
res_code = 400
message = "작업 수정을 위해서는 상위 업무의 고유값(PK)이 필요합니다."
res_code = 200
message = "None"
return res_code, message
def user_pair_check(self, user_pk: int, data: dict) -> int:
query = User.objects.filter(id=user_pk)
## return은 3가지로 반환받음
## 1 : 상위업무 수정, 2 : 하위 업무 수정, 3 : 둘다 수정 가능한 상태
if query.exists(): ## 유저에 대한 쿼리가 존재하는 경우에만 처리 로직을 수행한다.
user_team = query[0].team ## 수정을 진행하는 유저의 팀 정보
if int(user_pk) != int(data["create_user"]):
## 수정을 요청하는 유저와 상위 업무의 유저 정보가 동일하지 않는다면 하위 업무만 수정하게 된다.
## 아래 로직은 상위 업무를 배제하고, 하위 업무만을 수행시킨다.
fix_data = copy.deepcopy(data["sub_set"])
print
for num, index in enumerate(fix_data):
if index["team"] == user_team:
# target_num = num
# target_list.append(fix_data[target_num])
if index.get("pk", None) is None:
return 0
else:
index.pop("team")
index.pop("is_complete")
index.pop("is_delete")
if index.get("pk", None) is None:
return 0
return 2, fix_data
else: ## user_pk와 data['pk'] 가 동일한 경우 (상위와 하위에 모두 업무가 존재할 수 있음)
query = SubTask.objects
fix_data = copy.deepcopy(data)
fix_data["modified_date"] = timezone.now()
## is_delete 체크가 True가 되는 경우 하위 업무도 모두 True로 변경한다.
# if fix_data["is_delete"]:
# for index in fix_data["sub_set"]:
# index["is_delete"] = True
# if fix_data["is_complete"] and fix_data["is_delete"] == False:
# for index in fix_data["sub_set"]:
# index["is_complete_date"] = True
# index["complete_date"] = timezone.now()
# index["modified_date"] = timezone.now()
## is_complete는 하위 업무가 모두 True인 경우 True로 변경된다.
# fix_data = copy.deepcopy(data["sub_set"])
for num, index in enumerate(fix_data["sub_set"]):
if index["team"] == user_team:
if index.get("pk", None) is None:
return 0
else:
query = SubTask.objects.get(id=index["pk"])
index["team"] = query["team"]
index["modified_date"] = timezone.now()
if (
query["is_delete"] == False
and fix_data["is_delete"]
):
index["is_delete"] = True
else:
index["is_delete"] = query["is_delete"]
if (
query["is_complete"] == False
and fix_data["is_complete"]
):
fix_data["complete_date"] = timezone.now()
index["is_complete"] = True
index["complete_date"] = timezone.now()
else:
index["is_complete"] = query["is_complete"]
if index.get("pk", None) is None:
return 0
# data["sub_set"] = target_list
return 1, fix_data
##상위 및 하위 업무에 관련된 작업을 모두 수행할 수 있음
else: ## 유저에 대한정보가 존재하지 않으므로 400 에러를 발생시킨다.
return 0, None
def task_update(self, fix_data: dict) -> dict:
query = Task.objects.filter(id=fix_data["pk"])
return fix_data, query[0]
def subtask_update(self, fix_data: list) -> dict:
fix_data["modified_date"] = timezone.now()
query = SubTask.objects.filter(id=fix_data["pk"])
if fix_data["is_complete"] and query[0].is_complete == False:
fix_data["complete_date"] = timezone.now()
elif fix_data["is_complete"] and query[0].is_complete:
## 둘다 True인 경우 추가 작업을 할 필요는 없음
pass
else:
fix_data["complete_date"] = None
return fix_data, query[0]
def team_count(self, data: dict) -> bool:
team_list = [
"danbi",
"darae",
"blah",
"rail",
"sloth",
"ddang",
"supi",
]
count_list = [0] * len(team_list)
for index in data["sub_set"]:
if index.get("is_delete", None) is None:
count_list[team_list.index(index["team"])] += 1
if count_list[team_list.index(index["team"])] > 1:
return False
else:
if index["team"] and index["is_delete"] == False:
count_list[team_list.index(index["team"])] += 1
if count_list[team_list.index(index["team"])] > 1:
return False
return True
| basicgrammer/simple-project4 | backend/platform_app/Services/TaskService.py | TaskService.py | py | 7,780 | python | ko | code | 0 | github-code | 13 |
10620997074 |
import numpy as np
import tensorflow as tf
from keras import backend as K
__author__ = 'ignacio'
class WMDDistance(object):
#Implementation of Word Mover's Distance
#Reference
# From Word Embeddings To Document Distances
# http://www.jmlr.org/proceedings/papers/v37/kusnerb15.pdf
def __init__(self, dictionary, embeddings_model = None, distance_matrix = None):
self._vocab_len = len(dictionary)
self._embeddings = embeddings_model
self._dictionary = dictionary
self._distance_matrix = None
vocab_len = len(dictionary)
if distance_matrix is None:
# Compute distance matrix.
self._distance_matrix = np.zeros((vocab_len, vocab_len), dtype=np.double)
for i, t1 in list(dictionary.items()):
for j, t2 in list(dictionary.items()):
# Compute Euclidean distance between word vectors.
self._distance_matrix[i, j] = np.sqrt(np.sum((embeddings_model[t1] - embeddings_model[t2])**2))
else:
self._distance_matrix = distance_matrix
def save(self, path):
np.save(path, self._distance_matrix)
@staticmethod
def load(path, dictionary):
distance_matrix = np.load(path)
return WMDDistance(dictionary, distance_matrix = distance_matrix)
def get_distances(self):
return self._distance_matrix
@staticmethod
def distance(params):
batch_x = params[0]
batch_y = params[1]
distances = params[2]
i0 = tf.constant(0)
batch_size = K.shape(batch_x)[0]
result = tf.zeros(shape=(1, batch_size))
c = lambda i, similarity_mat, distances, result: i < tf.shape(similarity_mat)[0]
similarity_mat = tf.equal(tf.not_equal(batch_x, 0), tf.not_equal(batch_y, 0))
def body(i, similarity_mat, distances, result):
#
# Iteration over batch examples, for each example calculates the
# mean distance between the shared elements in similarity mat
similarity_row = similarity_mat[i, :]
non_zero_ind = tf.reshape(tf.where(similarity_row), [-1])
'''Performs cartesian product to get distances of shared words'''
tile_a = tf.tile(tf.expand_dims(non_zero_ind, 1), [1, tf.shape(non_zero_ind)[0]])
tile_a = tf.expand_dims(tile_a, 2)
tile_b = tf.tile(tf.expand_dims(non_zero_ind, 0), [tf.shape(non_zero_ind)[0], 1])
tile_b = tf.expand_dims(tile_b, 2)
cartesian_product = tf.concat([tile_a, tile_b], axis=2)
'''Creates a mask to add to the original tensor since is not posible to add new elements'''
mean = tf.reduce_mean(tf.gather_nd(distances, cartesian_product))
mask = tf.reshape(tf.one_hot(i, tf.shape(result)[1], on_value=mean), (1, -1))
return i + 1, similarity_mat, distances, tf.add(result, mask)
_, _, _, res = tf.while_loop(
c, body, loop_vars=[i0, similarity_mat, distances, result],
shape_invariants=[i0.shape, similarity_mat.shape, distances.shape, tf.TensorShape((None))])
return res
| lizarraldeignacio/smartweb | isistan/smartweb/algorithm/WMDDistance.py | WMDDistance.py | py | 3,258 | python | en | code | 0 | github-code | 13 |
2580455355 | from django.urls import path
from . import views
from django.contrib.auth import views as auth_views
app_name = "users"
urlpatterns = [
path('register/', views.RegisterView.as_view(), name='RegisterView'),
path('profile/', views.ProfilePageView.as_view(), name='ProfilePageView'),
path('profile-edit/', views.ProfileEditView.as_view(), name='ProfileEditView'),
path('login/', auth_views.LoginView.as_view(template_name='users/login.html'), name='login'),
path('logout/', auth_views.LogoutView.as_view(template_name='users/logout.html'), name='logout'),
] | AbdurRahman111/basic_ecommerce | users/urls.py | urls.py | py | 576 | python | en | code | 0 | github-code | 13 |
11407937832 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import json
import multiprocessing as mp
import os
import socket
import time
import argparse
from terminable_thread import Thread, threading
from api.server import start_api_server
from config import (docker_configuration, network_configuration,
user_configuration)
from core.alert import error, info
from core.color import reset_cmd_color
from core.compatible import (check_for_requirements, copy_dir_tree,
get_module_dir_path, is_verbose_mode,
logo, make_tmp_thread_dir, mkdir)
from core.exit_helper import exit_failure, exit_success, terminate_thread
from core.get_modules import (load_all_modules,
virtual_machine_name_to_container_name,
virtual_machine_names_to_container_names)
from core.network import network_traffic_capture
from database.connector import (push_events_queues_to_database,
push_events_to_database_from_thread,
create_indices)
from core.messages import load_messages
# tmp dirs
tmp_directories = []
processor_threads = []
messages = load_messages().message_contents
def all_existing_networks():
"""
list of all existing networks
Returns:
an array with list of all existing networks name
"""
return [network_name.rsplit()[1] for network_name in
os.popen("docker network ls").read().rsplit("\n")[1:-1]]
def create_ohp_networks():
"""
create docker internet and internal network for OWASP Honeypot
Returns:
True
"""
if "ohp_internet" not in all_existing_networks():
info(messages["creating_ohp_internet"])
os.popen("docker network create ohp_internet --opt com.docker.network.bridge.enable_icc=true "
"--opt com.docker.network.bridge.enable_ip_masquerade=true "
"--opt com.docker.network.bridge.host_binding_ipv4=0.0.0.0 --opt "
"com.docker.network.driver.mtu=1500").read()
network_json = json.loads(os.popen("docker network inspect ohp_internet").read())[
0]["IPAM"]["Config"][0]
info(messages["ohp_internet_network"].format(network_json["Subnet"],
network_json["Gateway"]))
if "ohp_no_internet" not in all_existing_networks():
info(messages["creating_ohp_no_internet"])
os.popen("docker network create --attachable --internal ohp_no_internet "
"--opt com.docker.network.bridge.enable_icc=true "
"--opt com.docker.network.bridge.enable_ip_masquerade=true "
"--opt com.docker.network.bridge.host_binding_ipv4=0.0.0.0 "
"--opt com.docker.network.driver.mtu=1500").read()
network_json = json.loads(os.popen("docker network inspect ohp_no_internet").read())[
0]["IPAM"]["Config"][0]
info(messages["ohp_no_internet_network"].format(network_json["Subnet"],
network_json["Gateway"]))
return True
def remove_tmp_directories():
"""
remove tmp directories submitted in tmp_directories
Returns:
True
"""
for tmp_dir in tmp_directories:
os.remove(tmp_dir)
return True
def running_containers():
"""
list of running containers
Returns:
an array with list of running containers name
"""
return [container.rsplit()[-1] for container in
os.popen("docker ps").read().rsplit("\n")[1:-1]]
def all_existing_containers():
"""
list of all existing containers
Returns:
an array with list of all existing containers name
"""
return [container.rsplit()[-1] for container in
os.popen("docker ps -a").read().rsplit("\n")[1:-1]]
def all_existing_images():
"""
list of all existing images
Returns:
a array with list of all existing images name
"""
return [
container.rsplit()[0] for container in
os.popen("docker images").read().rsplit("\n")[1:-1]
]
def stop_containers(configuration):
"""
stop old containers based on images
Args:
configuration: user final configuration
Returns:
True
"""
containers_list = running_containers()
container_names = virtual_machine_names_to_container_names(configuration)
if containers_list:
for container in container_names:
if container in containers_list:
info(
"killing container {0}".format(
os.popen(
"docker kill {0}".format(
container
)
).read().rsplit()[0]
)
)
return True
def remove_old_containers(configuration):
"""
remove old containers based on images
Args:
configuration: user final configuration
Returns:
True
"""
containers_list = all_existing_containers()
for container in virtual_machine_names_to_container_names(configuration):
if container in containers_list:
info(
"removing container {0}".format(
os.popen(
"docker rm {0}".format(container)
).read().rsplit()[0]
)
)
return True
def get_image_name_of_selected_modules(configuration):
"""
get list of image name using user final configuration
Args:
configuration: user final configuration
Returns:
list of virtual machine image name
"""
return virtual_machine_names_to_container_names(configuration)
def remove_old_images(configuration):
"""
remove old images based on user configuration
Args:
configuration: user final configuration
Returns:
True
"""
for image in all_existing_images():
if image in get_image_name_of_selected_modules(configuration):
info(messages["removing_image"].format(image))
os.popen("docker rmi {0}".format(image)).read()
return True
def create_new_images(configuration):
"""
start new images based on configuration and dockerfile
Args:
configuration: user final configuration
Returns:
True
"""
for selected_module in configuration:
# go to tmp folder to create Dockerfile and files dir
tmp_dir_name = make_tmp_thread_dir()
os.chdir(tmp_dir_name)
# create files dir
mkdir("files")
# create Dockerfile
dockerfile = open("Dockerfile", "w")
dockerfile.write(configuration[selected_module]["dockerfile"])
dockerfile.close()
# copy files
copy_dir_tree(configuration[selected_module]["files"], "files")
# create docker image
image_name = virtual_machine_name_to_container_name(
configuration[selected_module]["virtual_machine_name"],
selected_module
)
info(messages["creating_image"].format(image_name))
# in case if verbose mode is enabled, we will be use os.system
# instead of os.popen to show the outputs in case
# of anyone want to be aware what's happening or what's the error,
# it's a good feature for developers as well
# to create new modules
if is_verbose_mode():
os.system("docker build . -t {0}".format(image_name))
else:
os.popen("docker build . -t {0}".format(image_name)).read()
# created
info(messages["image_created"].format(image_name))
# go back to home directory
os.chdir("../..")
# submit tmp dir name
tmp_directories.append(tmp_dir_name)
return True
def start_containers(configuration):
"""
start containers based on configuration and dockerfile
Args:
configuration: JSON container configuration
Returns:
configuration containing IP Addresses
"""
for selected_module in configuration:
# get the container name to start (organizing)
# using pattern name will help us to remove/modify the images and
# modules
container_name = virtual_machine_name_to_container_name(
configuration[selected_module]["virtual_machine_name"],
selected_module
)
configuration[selected_module]['container_name'] = container_name
real_machine_port = configuration[selected_module]["real_machine_port_number"]
virtual_machine_port = configuration[selected_module]["virtual_machine_port_number"]
# connect to owasp honeypot networks!
# run the container with internet access
os.popen(
"docker run {0} --net {4} --name={1} -d -t -p {2}:{3} {1}".format(
" ".join(
configuration[selected_module]["extra_docker_options"]
),
container_name,
real_machine_port,
virtual_machine_port,
'ohp_internet' if configuration[selected_module]["virtual_machine_internet_access"]
else 'ohp_no_internet'
)
).read()
try:
virtual_machine_ip_address = os.popen(
"docker inspect -f '{{{{range.NetworkSettings.Networks}}}}"
"{{{{.IPAddress}}}}{{{{end}}}}' {0}".format(
container_name
)
).read().rsplit()[0].replace("\'", "") # single quotes needs to be removed in windows
except Exception:
virtual_machine_ip_address = "CANNOT_FIND_IP_ADDRESS"
# add virtual machine IP Address to configuration
configuration[selected_module]["ip_address"] = virtual_machine_ip_address
# print started container information
info(
"container {0} started, forwarding 0.0.0.0:{1} to {2}:{3}".format(
container_name,
real_machine_port,
virtual_machine_ip_address,
virtual_machine_port
)
)
return configuration
def containers_are_unhealthy(configuration):
"""
check if all selected module containers are up and running!
:param configuration: JSON container configuration
:return: []/[containters]
"""
unhealthy_containers = []
for selected_module in configuration:
container_name = configuration[selected_module]['container_name']
unhealthy_containers.append(container_name)
current_running_containers = running_containers()
return [containter for containter in unhealthy_containers
if containter not in current_running_containers]
def wait_until_interrupt(virtual_machine_container_reset_factory_time_seconds, configuration,
new_network_events_thread, run_as_test):
"""
wait for opened threads/honeypots modules
Returns:
True
"""
# running_time variable will be use to check
# if its need to reset the container after a while
# if virtual_machine_container_reset_factory_time_seconds < 0,
# it will keep containers until user interruption
running_time = 0
while True:
# while True sleep until user send ctrl + c
try:
time.sleep(1)
running_time += 1
# check if running_time is equal to reset factory time
if running_time == virtual_machine_container_reset_factory_time_seconds:
# reset the run time
running_time = 0
# stop old containers (in case they are not stopped)
stop_containers(configuration)
# remove old containers (in case they are not updated)
remove_old_containers(configuration)
# start containers based on selected modules
start_containers(configuration)
if not new_network_events_thread.is_alive():
return error(messages["interrupt_application"])
if containers_are_unhealthy(configuration):
if is_verbose_mode():
for container in containers_are_unhealthy(configuration):
os.system(
"docker logs {0}".format(
container
)
)
return error(
"Interrupting the application because \"{0}\" container(s) is(are) not alive!".format(
", ".join(containers_are_unhealthy(configuration))
)
)
if run_as_test:
break
except KeyboardInterrupt:
# break and return for stopping and removing containers/images
info(messages["interrupted_by_user"])
break
return True
def honeypot_configuration_builder(selected_modules):
"""
honeypot configuration builder
Args:
selected_modules: list of selected modules
Returns:
JSON/Dict OHP configuration
"""
# the modules are available in lib/modules/category_name/module_name
# (e.g. lib/modules/ftp/weak_password
# they will be listed based on the folder names and if "Dockerfile" exist!
# the Dockerfile will be read and add into JSON configuration (dockerfile)
honeypot_configuration = {}
for module in selected_modules:
# read category configuration (e.g. ftp, ssh, http, etc..), they are
# located in lib/modules/category/__init__.py
# in the __init__.py every category has same function as below!
# def category_configuration():
# return {
# "virtual_machine_name": "ohp_sshserver",
# "virtual_machine_port_number": 22,
# "virtual_machine_internet_access": False,
# "real_machine_port_number": 22
# }
category_configuration = getattr(
__import__(
"modules.{0}".format(
module.rsplit("/")[0]),
fromlist=["category_configuration"]
),
"category_configuration"
)
# reading each module configuration (e.g. ftp/weak_password, etc..)
# they are located in lib/modules/category/module_name/__init__.py
# each module must have such a function (in case you can return {} if you don't have any configuration)
# def module_configuration():
# return {
# "username": "admin",
# "password": "123456"
# }
# to replace the category default port for individual modules, you have to add "real_machine_port_number"
# key to module configuration to replace it.
#
# for instance:
# def module_configuration():
# return {
# "username": "admin",
# "password": "123456"
# "real_machine_port_number": 2121
# }
module_configuration = getattr(
__import__(
"modules.{0}".format(
module.replace("/", ".")
), fromlist=["module_configuration"]
),
"module_configuration"
)
# combine category + module configuration into one Dict/JSON
combined_module_configuration = category_configuration()
combined_module_configuration.update(module_configuration())
# dockerfile
dockerfile = open(
os.path.join(
get_module_dir_path(module_configuration),
"Dockerfile"
)
).read()
# based on your configuration, the variables/values will be set into your Dockerfile
# e.g. username will be replaced by {username} in Dockerfile
combined_module_configuration["dockerfile"] = dockerfile.format(
**combined_module_configuration
)
# add module files
combined_module_configuration["files"] = os.path.join(
get_module_dir_path(
module_configuration
),
"files"
)
# combine Dockerfile configuration with module and category
# configuration
honeypot_configuration[module] = combined_module_configuration
return honeypot_configuration
def port_is_free(real_machine_port):
"""
check if port is free
Args:
real_machine_port: port number
Returns:
True or False
"""
try:
tcp = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
tcp.bind(
(
"0.0.0.0",
real_machine_port
)
)
tcp.close()
return True
except Exception:
return False
def reserve_tcp_port(real_machine_port, module_name, configuration):
"""
pick a free port
Args:
real_machine_port: port number
module_name: selected module
configuration: fixed configuration
Returns:
port number
"""
while True:
try:
if port_is_free(real_machine_port):
# unique_port = True
configuration[module_name]["real_machine_port_number"] = real_machine_port
duplicated_ports = []
for selected_module in configuration:
duplicated_ports.append(
configuration[selected_module]["real_machine_port_number"])
if duplicated_ports.count(real_machine_port) == 1:
info(messages["port_selected"].format(real_machine_port, module_name))
return real_machine_port
except Exception:
pass
real_machine_port += 1
def conflict_ports(configuration):
"""
check conflict ports in configuration
Args:
configuration: user final configuration
Returns:
new fixed configuration
"""
# todo: write documentation
fixed_configuration = configuration.copy()
for selected_module in configuration:
port = reserve_tcp_port(
configuration[selected_module]["real_machine_port_number"],
selected_module,
fixed_configuration
)
fixed_configuration[selected_module]["real_machine_port_number"] = port
return fixed_configuration
def run_modules_processors(configuration):
"""
run ModuleProcessor for each modules
:param configuration: user final configuration
:return:
"""
for module in configuration:
module_processor_thread = Thread(
target=configuration[module]["module_processor"].processor,
name=virtual_machine_name_to_container_name(
configuration[module]["virtual_machine_name"],
module
)
)
module_processor_thread.start()
processor_threads.append(module_processor_thread)
return
def stop_modules_processors(configuration):
"""
run ModuleProcessor for each modules
:param configuration: user final configuration
:return:
"""
for module in configuration:
configuration[module]["module_processor"].kill_flag = True
while True:
if True not in [
module_processor_thread.is_alive() for module_processor_thread in processor_threads
]:
break
time.sleep(0.1)
return
def set_network_configuration(argv_options):
"""
Set network configuration based on user selections
Args:
argv_options
Returns:
network_config
"""
network_config = network_configuration()
# Set the values of the network configuration based on CLI input
network_config["store_network_captured_files"] = argv_options.store_pcap
network_config["split_pcap_file_timeout"] = argv_options.timeout_value
return network_config
def update_language(argv_options):
"""
Update language for messages
Args:
argv_options
"""
if argv_options.language not in load_messages().languages_list:
exit_failure("Invalid language code. Available options are " + ", ".join(load_messages().languages_list))
def argv_parser():
"""
parse ARGVs using argparse
Returns:
parser, parsed ARGVs
"""
# create parser
parser = argparse.ArgumentParser(prog="OWASP Honeypot", add_help=False)
# create menu
docker_config = docker_configuration()
user_config = user_configuration()
engineOpt = parser.add_argument_group(
"OHP Engine",
"OHP Engine input options"
)
# add select module options + list of available modules
engineOpt.add_argument(
"-m", "--select-module",
action="store",
dest="selected_modules",
default=user_config["default_selected_modules"],
help="select module(s) {0}".format(
load_all_modules() + ["all"]
)
)
# by default all modules are selected, in case users can exclude one or
# some (separated with comma)
engineOpt.add_argument(
"-x",
"--exclude-module",
action="store",
dest="excluded_modules",
default=user_config["default_excluded_modules"],
help="select modules(s) to exclude {0}".format(
load_all_modules()
)
)
# limit the virtual machine storage to avoid related abuse
engineOpt.add_argument(
"-s",
"--vm-storage-limit",
action="store",
dest="virtual_machine_storage_limit",
type=float,
default=docker_config["virtual_machine_storage_limit"],
help="virtual machine storage limit"
)
# reset the containers once in a time to prevent being continues botnet
# zombie
engineOpt.add_argument(
"-r",
"--vm-reset-factory-time",
action="store",
dest="virtual_machine_container_reset_factory_time_seconds",
type=int,
default=docker_config["virtual_machine_container_reset_factory_time_seconds"],
help="virtual machine reset factory time"
)
# start API
engineOpt.add_argument(
"--start-api-server",
action="store_true",
dest="start_api_server",
default=False,
help="start API server"
)
# Store Network captured files
engineOpt.add_argument(
"--store-pcap",
action="store_true",
dest="store_pcap",
default=False,
help="store network traffic as pcap files"
)
# Set Timeout value for splitting network captured files
engineOpt.add_argument(
"-t",
"--split-pcap-file-timeout",
type=int,
dest="timeout_value",
default=3600,
help="timeout value used to split network captured files"
)
# enable verbose mode (debug mode)
engineOpt.add_argument(
"-v",
"--verbose",
action="store_true",
dest="verbose_mode",
default=False,
help="enable verbose mode"
)
# disable color CLI
engineOpt.add_argument(
"--disable-colors",
action="store_true",
dest="disable_colors",
default=False,
help="disable colors in CLI"
)
# set language
engineOpt.add_argument(
"--language",
type=str,
dest="language",
default="en_US",
help="Set the default language. {languages}".format(languages=load_messages().languages_list)
)
# test CI/ETC
engineOpt.add_argument(
"--test",
action="store_true",
dest="run_as_test",
default=False,
help="run a test and exit"
)
# help menu
engineOpt.add_argument(
"-h",
"--help",
action="store_true",
default=False,
dest="show_help_menu",
help="print this help menu"
)
return parser, parser.parse_args()
def load_honeypot_engine():
"""
load OHP Engine
Returns:
True
"""
# print logo
logo()
# parse argv
parser, argv_options = argv_parser()
# check the language
if argv_options.language:
update_language(argv_options)
#########################################
# argv rules apply
#########################################
# check help menu
if argv_options.show_help_menu:
parser.print_help()
exit_success()
# check for requirements before start
check_for_requirements(argv_options.start_api_server)
# create indices before server start
create_indices()
# check api server flag
if argv_options.start_api_server:
start_api_server()
exit_success()
# Check if the script is running with sudo
if not os.geteuid() == 0:
exit_failure(messages['script_must_run_as_root'])
# Check timeout value if provided
if argv_options.timeout_value < 1:
exit_failure(messages["timeout_error"])
# check selected modules
if argv_options.selected_modules:
selected_modules = list(set(argv_options.selected_modules.rsplit(",")))
if "all" in selected_modules:
selected_modules = load_all_modules()
if "" in selected_modules:
selected_modules.remove("")
# if selected modules are zero
if not len(selected_modules):
exit_failure(messages["no_module_selected_error"])
# if module not found
for module in selected_modules:
if module not in load_all_modules():
exit_failure("module {0} not found!".format(module))
# check excluded modules
if argv_options.excluded_modules:
excluded_modules = list(set(argv_options.excluded_modules.rsplit(",")))
if "all" in excluded_modules:
exit_failure(messages["all_modules_excluded_error"])
if "" in excluded_modules:
excluded_modules.remove("")
# remove excluded modules
for module in excluded_modules:
if module not in load_all_modules():
exit_failure("module {0} not found!".format(module))
# ignore if module not selected, it will remove anyway
try:
selected_modules.remove(module)
except Exception:
pass
# if selected modules are zero
if not len(selected_modules):
exit_failure(messages["no_module_selected_error"])
virtual_machine_container_reset_factory_time_seconds = argv_options. \
virtual_machine_container_reset_factory_time_seconds
run_as_test = argv_options.run_as_test
#########################################
# argv rules apply
#########################################
# build configuration based on selected modules
configuration = honeypot_configuration_builder(selected_modules)
# Set network configuration
network_config = set_network_configuration(argv_options)
info(messages["start_message"])
info(messages["loading_modules"].format(", ".join(selected_modules)))
# check for conflict in real machine ports and pick new ports
info(messages["check_for_port_conflicts"])
configuration = conflict_ports(configuration)
# stop old containers (in case they are not stopped)
stop_containers(configuration)
# remove old containers (in case they are not updated)
remove_old_containers(configuration)
# remove old images (in case they are not updated)
remove_old_images(configuration)
# create new images based on selected modules
create_new_images(configuration)
# create OWASP Honeypot networks in case not exist
create_ohp_networks()
# start containers based on selected modules
configuration = start_containers(configuration)
# network capture process
mp.set_start_method('spawn')
# Event queues
honeypot_events_queue = mp.Queue()
network_events_queue = mp.Queue()
# start a new process for network capture
network_traffic_capture_process = mp.Process(
target=network_traffic_capture,
args=(
configuration,
honeypot_events_queue,
network_events_queue,
network_config,
),
name="network_traffic_capture_process"
)
network_traffic_capture_process.start()
info(
messages["selected_modules_started"].format(
", ".join(
selected_modules
)
)
)
# start a thread to push events to database regularly
bulk_events_thread = Thread(
target=push_events_to_database_from_thread,
args=(honeypot_events_queue, network_events_queue,),
name="insert_events_in_bulk_thread"
)
bulk_events_thread.start()
# run module processors
run_modules_processors(configuration)
# wait forever! in case user can send ctrl + c to interrupt
exit_flag = wait_until_interrupt(
virtual_machine_container_reset_factory_time_seconds,
configuration,
network_traffic_capture_process,
run_as_test
)
# killed the network traffic capture process by ctrl + c... waiting to end.
info(messages["killing_capture_process"])
if run_as_test:
network_traffic_capture_process.terminate()
# without ci it will be terminate after a few seconds, it needs to kill the tshark and update pcap file collection
network_traffic_capture_process.join()
# if in case any events that were not inserted from thread
push_events_queues_to_database(honeypot_events_queue, network_events_queue)
# Kill bulk events thread
terminate_thread(bulk_events_thread)
# stop created containers
stop_containers(configuration)
# stop module processor
stop_modules_processors(configuration)
# remove created containers
remove_old_containers(configuration)
# remove created images
remove_old_images(configuration)
# remove_tmp_directories() error: access denied!
# kill all missed threads
for thread in threading.enumerate()[1:]:
terminate_thread(thread, False)
info(messages["finished"])
# reset cmd/terminal color
reset_cmd_color()
return exit_flag
| OWASP/Python-Honeypot | core/load.py | load.py | py | 30,410 | python | en | code | 383 | github-code | 13 |
25539376617 | # Definition for a binary tree node.
class TreeNode(object):
def __init__(self, x):
self.val = x
self.left = None
self.right = None
class Solution(object):
def buildTree(self, preorder, inorder):
"""
:type preorder: List[int]
:type inorder: List[int]
:rtype: TreeNode
"""
def recur(pre_root, in_left, in_right):
'''
:param pre_root: 先序遍历中当前子树根节点的下标
:param in_left: 中序遍历中子树的左边界
:param in_right: 中序遍历中子树的右边界
:return: 每个子树的根节点
in_root:中序遍历中当前子树根节点的下标
'''
if in_left > in_right:
return
node = TreeNode(preorder[pre_root])
in_root = dic[preorder[pre_root]]
node.left = recur(pre_root + 1, in_left, in_root - 1)
node.right = recur(in_root - in_left + pre_root + 1, in_root + 1, in_right)
return node
dic = {}
for i in range(len(inorder)):
dic[inorder[i]] = i
return recur(0, 0, len(inorder) - 1)
if __name__ == '__main__':
obj = Solution()
preorder = [3,9,20,15,7]
inorder = [9,3,15,20,7]
result = obj.buildTree(preorder, inorder)
print(result.val)
| luppx/leetcode | jianzhioffer/firstround/jianzhi_07.py | jianzhi_07.py | py | 1,378 | python | en | code | 0 | github-code | 13 |
36945460709 | from collections import Counter
def poly(s):
res = ''
dd = dict()
sin = ''
for k, v in Counter(s).most_common():
if v > 1:
dd[k] = v
elif v == 1 and (sin == '' or k < sin):
sin = k
for c in sorted(dd.keys()):
res += c * (dd[c] // 2)
if dd[c] % 2 == 1 and (sin == '' or c < sin):
sin = c
res = res + sin + res[::-1]
return res
# if __name__ == '__main__':
# import sys
# sys.stdin = open('input001.txt', 'r')
# assert poly(input()) == 'k'
#
# sys.stdin = open('input002.txt', 'r')
# assert poly(input()) == 'iai'
#
# assert poly('jijijijijijijijijijij') == 'iiiiijjjjjjjjjjjiiiii'
# print('GJ!')
print(poly(input()))
| iaramer/algorithms | python/mipt/mipt_contest/contest/C/problem_c.py | problem_c.py | py | 751 | python | en | code | 0 | github-code | 13 |
73006149138 |
import os
import numpy as np
import glob
from PIL import Image
from jittor.dataset.dataset import Dataset
import jittor.transform as transform
import matplotlib.pyplot as plt
import mxnet as mx
def get_dataset(path, resolution, batch_size):
root_path = os.path.join(path, str(resolution))
return FolderDataset(root_path).set_attrs(batch_size=batch_size, shuffle=True, num_workers=2)
class FolderDataset(Dataset):
def __init__(self, root_path, suffix="*.png"):
super(FolderDataset, self).__init__()
self.root_path = root_path
self.use_rec = False
self.file_lst = glob.glob(root_path + "/{}".format(suffix))
if not len(self.file_lst):
self.file_lst = glob.glob(root_path + "/{}".format("*.jpg"))
# self.set_attrs(total_len=len(self.file_lst))
# if os.path.basename(root_path) in ["8", "16", "32", "64", "128"]:
# image_rec_path = "{}_rec".format(root_path)
# path_imgrec = os.path.join(image_rec_path, 'train.rec')
# path_imgidx = os.path.join(image_rec_path, 'train.idx')
# self.imgrec = mx.recordio.MXIndexedRecordIO(path_imgidx, path_imgrec, 'r')
# self.use_rec = True
train_transforms = [
transform.ToPILImage(),
transform.RandomHorizontalFlip(),
transform.ToTensor(),
transform.ImageNormalize(mean=[0.5, 0.5, 0.5], std=[0.5, 0.5, 0.5])
]
# self.transform = transform.Compose([
# transform.ToPILImage(),
# transform.RandomHorizontalFlip(),
# transform.ToTensor(),
# transform.ImageNormalize(mean=[0.5, 0.5, 0.5], std=[0.5, 0.5, 0.5])
# ])
self.transform = transform.Compose(train_transforms)
self.image_array = []
for file_path in self.file_lst:
# image = Image.open(file_path)
# image_copy = image.copy()
# self.image_array.append(image_copy)
# image.close()
# image = plt.imread(file_path)
image = plt.imread(file_path)
if file_path[-4:] == '.png':
image = image * 255
image = image.astype('uint8')
self.image_array.append(image)
def __getitem__(self, index):
if self.use_rec:
s = self.imgrec.read_idx(index)
header, img = mx.recordio.unpack(s)
image_copy = mx.image.imdecode(img).asnumpy()
else:
# file_path = self.file_lst[index]
# image = Image.open(file_path)
# image_copy = image.copy()
# image.close()
image_copy = self.image_array[index]
return self.transform(image_copy)
image = self.image_array[index]
return self.transform(image)
def __len__(self):
return len(self.file_lst) | fengshikun/JittorStylegan | dataloader.py | dataloader.py | py | 2,932 | python | en | code | 0 | github-code | 13 |
24111091423 | import math
from django.shortcuts import render
from django.http import HttpResponse
from django.views.decorators.csrf import csrf_exempt
from rest_framework.decorators import api_view
from django.http import JsonResponse
from binance.client import Client, AsyncClient
from binance.exceptions import BinanceAPIException, BinanceOrderException
from binance import ThreadedWebsocketManager
from unicorn_binance_websocket_api.unicorn_binance_websocket_api_manager import BinanceWebSocketApiManager
import unicorn_binance_rest_api
from unicorn_fy.unicorn_fy import UnicornFy
from operator import itemgetter
import requests
import json
import os
from multiprocessing import Pool, Process
from .models import MultiProcess
import asyncio, time
import numpy as np
import pandas as pd
from numba import jit, cuda
from datetime import datetime
import time
# LOCAL_TIMEZONE = datetime.now(datetime.timezone.utc).astimezone().tzinfo
from calendar import monthrange
from queue import Queue
import requests
import aiohttp
import websockets
# Create your views here.
binance_api_key = "Redacted ;)"
binance_api_secret = ""
client = Client(binance_api_key, binance_api_secret)
websocket_running = False
all_streams_running = False
http_fetched = False
global subsequent_finished_klines_queue
global all_streams
global all_futures_historical_klines
global markets
global percent_change_dict
@api_view(["GET"])
@csrf_exempt
def getWebsocketKlines(request):
global websocket_running
global all_streams_running
if not websocket_running:
websocket_running = True
global channels
global ubwa
global stream_id_dict
global all_streams
global subsequent_finished_klines_queue
global markets
prices = client.futures_mark_price()
markets = []
for val in prices:
markets.append(val["symbol"])
all_streams = {}
for symbol in markets:
all_streams[symbol] = {}
ubwa = BinanceWebSocketApiManager(exchange="binance.com-futures")
channels = ['kline_1m', 'kline_5m', 'kline_15m', 'kline_1h', 'kline_4h', 'kline_1d', 'kline_1w', 'kline_1M']
stream_id_dict = {}
subsequent_finished_klines_queue = Queue(maxsize=0) # infinite-size queue
def list_fragmenter(symbol_list):
list_of_lists = []
for symbol in symbol_list:
for k in range(len(channels)):
list_of_lists.append([symbol, channels[k]])
return list_of_lists
async def start_threads(symbol_and_channel_list):
stream_id = ubwa.create_stream(markets=symbol_and_channel_list[0], channels=symbol_and_channel_list[1],
stream_buffer_name=True)
stream_id_dict[stream_id] = symbol_and_channel_list[0]
async def run_start_threads_in_parallel(markets_list_fragments):
for market_fragment in markets_list_fragments:
task = asyncio.create_task(start_threads(market_fragment))
await task
markets_list_fragments = list_fragmenter(markets)
start = time.time()
asyncio.run(run_start_threads_in_parallel(markets_list_fragments))
end = time.time()
print("Took {} seconds to start {} websockets.".format(end - start, 1184))
all_streams_running = True
# begin parallelization of updating all_streams dictionary with most recent kline
def dictionary_fragmenter(dictionary):
j = 0
list_of_dicts = [{} for _ in range(len(markets) * len(channels))]
for stream_id in dictionary:
list_of_dicts[j][stream_id] = dictionary[stream_id]
j += 1
return list_of_dicts
async def handle_current_dictionary_fragment(dict_fragment):
for stream_id in dict_fragment: # run this in #ofcores different processes
oldest_data_from_stream_buffer = ubwa.pop_stream_data_from_stream_buffer(stream_buffer_name=stream_id)
if oldest_data_from_stream_buffer:
data = json.loads(str(oldest_data_from_stream_buffer))
if "result" in data:
continue
all_streams[dict_fragment[stream_id]][data["stream"]] = data["data"]["k"]
if data["data"]["k"]["x"]:
subsequent_finished_klines_queue.put(data["data"]["k"])
async def handle_batch_of_dicts_in_parallel(inputted_dict_fragments):
for dict_fragment in inputted_dict_fragments:
task = asyncio.create_task(handle_current_dictionary_fragment(dict_fragment))
await task
dict_fragments = dictionary_fragmenter(stream_id_dict)
while True:
asyncio.run(handle_batch_of_dicts_in_parallel(dict_fragments))
time.sleep(0.01)
@api_view(["GET"])
@csrf_exempt
def getHttpKlines(request):
global http_fetched
global all_streams_running
if not http_fetched:
while True:
if all_streams_running:
global all_futures_historical_klines
global subsequent_finished_klines_queue
global markets
prices = client.futures_mark_price()
markets = []
for val in prices:
markets.append(val["symbol"])
all_futures_historical_klines = {}
for symbol in markets:
all_futures_historical_klines[symbol] = {}
eight_timeframes = ["1m", "5m", "15m", "1h", "4h", "1d", "1w", "1M"]
urls = {}
for symbol in markets:
for timeframe in eight_timeframes:
urls[symbol + "@" + timeframe] = 'https://api.binance.com/api/v3/klines?symbol=' + symbol.upper() \
+ '&interval=' + timeframe + '&limit=2'
current_unix_timestamp_seconds = datetime.timestamp(datetime.now())
async def get(url, key, session):
try:
async with session.get(url=url) as response:
historical_klines = await response.json()
#print(historical_klines)
symbol_and_timeframe = key.split("@")
symbol = symbol_and_timeframe[0]
timeframe = symbol_and_timeframe[1]
if len(historical_klines) > 1: #i.e., you have a finished candle that isn't websocket
if "code" in historical_klines:
print(symbol)
all_futures_historical_klines[symbol][timeframe] = historical_klines[0:-1] #cutting off websocket candle
else:
all_futures_historical_klines[symbol][timeframe] = []
#print("Successfully got url {} with resp of length {}.".format(url, len(historical_klines)))
except Exception as e:
print("Unable to get url {} due to {}.".format(url, e))
async def main(urls):
async with aiohttp.ClientSession() as session:
ret = await asyncio.gather(*[get(urls[key], key, session) for key in urls])
#print("Finalized all. Return is a list of len {} outputs.".format(len(ret)))
start = time.time()
asyncio.run(main(urls))
end = time.time()
print("Took {} seconds to pull {} websites.".format(end - start, len(urls)))
# update klines here
http_fetched = True
while True: # need this to keep it running when the queue is empty it should continue checking, not return, ever
try:
kline = subsequent_finished_klines_queue.get()
if kline["T"] > current_unix_timestamp_seconds:
kline_values = [kline["t"], kline["o"], kline["h"], kline["l"], kline["c"], kline["v"], kline["T"],
kline["q"], kline["n"], kline["V"], kline["Q"], kline["B"]]
all_futures_historical_klines[kline["s"]][kline["i"]].append(kline_values)
time.sleep(0.01)
except:
time.sleep(0.01)
continue
return JsonResponse(all_futures_historical_klines)
else:
time.sleep(0.01)
continue
@api_view(["GET"])
@csrf_exempt
def getPercentGainers(request):
global all_streams
global percent_change_dict
global markets
if all_streams_running:
percent_change_dict = {}
for symbol in markets:
percent_change_dict[symbol] = {}
def dictionary_fragmenter_percent_gainers(dictionary):
list_of_dicts = [{} for _ in range(len(markets) * 8)]
i = 0
for symbol in dictionary:
eight_klines = dictionary[symbol]
for timeframe in eight_klines:
list_of_dicts[i][timeframe] = eight_klines[timeframe]
i += 1
return list_of_dicts
async def handle_current_dictionary_fragment(dict_fragment):
for timeframe in dict_fragment:
symbol = timeframe.split("@")[0]
kline_values = dict_fragment[timeframe]
try:
percent_gain_for_timeframe = (float(kline_values["c"]) - float(
kline_values["o"])) / float(kline_values["o"]) * 100
percent_change_dict[symbol.upper()][timeframe] = percent_gain_for_timeframe
except:
continue
async def handle_batch_of_dicts_in_parallel(inputted_dict_fragments):
for dict_fragment in inputted_dict_fragments:
task = asyncio.create_task(handle_current_dictionary_fragment(dict_fragment))
await task
dict_fragments = dictionary_fragmenter_percent_gainers(all_streams)
start = time.perf_counter()
proc = Process(target=handle_batch_of_dicts_in_parallel, args=(dict_fragments,))
proc.start()
proc.join() # doesn't move on until function is finished
# asyncio.run(handle_batch_of_dicts_in_parallel(dict_fragments))
end = time.perf_counter()
# start = time.perf_counter()
# asyncio.run(handle_batch_of_dicts_in_parallel(dict_fragments))
# end = time.perf_counter()
print(f'Finished in {round(end - start, 2)} second(s)')
return JsonResponse(percent_change_dict)
else:
return JsonResponse({"code": "Key"})
checking_for_15_minute_highs_started = False
fifteen_min_highs = []
def checkFor15MinHighs():
global fifteen_min_highs
global all_streams
global all_futures_historical_klines
global markets
scan_candidates = []
for symbol in markets:
scan_candidates.append(symbol + "@15m")
async def handle_current_symbol(symbol_and_timeframe):
list_symbol_and_timeframe = symbol_and_timeframe.split("@")
symbol = list_symbol_and_timeframe[0]
timeframe = list_symbol_and_timeframe[1]
try:
while True:
historical_kline_values = all_futures_historical_klines[symbol][timeframe]
current_websocket_kline = all_streams[symbol][symbol.lower() + "@kline_" + timeframe]
if current_websocket_kline["h"] <= historical_kline_values[-1][2]:
while True:
historical_kline_values = all_futures_historical_klines[symbol][timeframe]
current_websocket_kline = all_streams[symbol][symbol.lower() + "@kline_" + timeframe]
if current_websocket_kline["c"] > historical_kline_values[-1][2]:
fifteen_min_highs.append(symbol)
while True:
current_1m_websocket_kline = all_streams[symbol][symbol.lower() + "@kline_1m"]
if current_1m_websocket_kline["x"]:
fifteen_min_highs.remove(symbol)
break
break
except:
pass
async def handle_candidates_in_parallel(candidates):
for symbol_and_timeframe in candidates:
task = asyncio.create_task(handle_current_symbol(symbol_and_timeframe))
await task
asyncio.run(handle_candidates_in_parallel(scan_candidates))
@api_view(["GET"])
@csrf_exempt
def get15MinuteHighs(request):
global checking_for_15_minute_highs_started
global fifteen_min_highs
global percent_change_dict
scan_results = {}
if http_fetched:
if not checking_for_15_minute_highs_started:
checking_for_15_minute_highs_started = True
checkFor15MinHighs()
for symbol in fifteen_min_highs:
scan_results[symbol] = percent_change_dict[symbol]
return JsonResponse(scan_results)
@api_view(["GET"])
@csrf_exempt
def getFuturesOpenOrders(request):
try:
response = client.futures_get_open_orders()
return JsonResponse(response, safe=False)
except BinanceAPIException as e:
# error handling goes here
return JsonResponse({'error': e.message})
@api_view(['GET'])
@csrf_exempt
def getFuturesUserData(request):
bwam = BinanceWebSocketApiManager(exchange="binance.com-futures")
# set api key and secret for userData stream
binance_api_key = "LxxlvPJTckWPKGTEsoIWa5eCpytCTDqAizP7JAzuzGKLKZiPhx368sWTHABV1vMN"
binance_api_secret = "a30pZsNVNctmHKKuFCabCfbrLWyCVgCSWaH2UFfpatS4fSaQJF9kOXfJrWIOsoIe"
userdata_stream_id = bwam.create_stream(["arr"], ["!userData"], api_key=binance_api_key, api_secret=binance_api_secret)
return JsonResponse({'streamId' : userdata_stream_id})
@api_view(["POST"])
@csrf_exempt
def marketOrder(request):
data = json.loads(request.body.decode('utf-8'))
try:
order = client.futures_create_order(
symbol=data["symbol"],
side=data["side"],
type=data["type"],
quantity=data["quantity"])
return JsonResponse(order, safe=False)
except BinanceAPIException as e:
# error handling goes here
return JsonResponse({'error': e.message})
except BinanceOrderException as e:
# error handling goes here
return JsonResponse({'error': e.message})
@api_view(["POST"])
@csrf_exempt
def stopOrder(request):
# Stop Order:
data = json.loads(request.body.decode('utf-8'))
try:
order = client.futures_create_order(
symbol=data["symbol"],
type=data["type"],
side=data["side"],
stopPrice=data["stopPrice"],
quantity=data["quantity"])
return JsonResponse(order, safe=False)
except BinanceAPIException as e:
# error handling goes here
return JsonResponse({'error': e.message})
except BinanceOrderException as e:
# error handling goes here
return JsonResponse({'error': e.message})
@api_view(["POST"])
@csrf_exempt
def limitOrder(request):
data = json.loads(request.body.decode('utf-8'))
try:
order = client.futures_create_order(
symbol=data["symbol"],
type=data["type"],
side=data["side"],
price=data["price"],
timeInForce = "GTC",
quantity=data["quantity"])
return JsonResponse(order, safe=False)
except BinanceAPIException as e:
# error handling goes here
return JsonResponse({'error': e.message})
except BinanceOrderException as e:
# error handling goes here
return JsonResponse({'error': e.message})
| 707Plushka707/TradingClone | python_backend/api/views.py | views.py | py | 16,196 | python | en | code | 0 | github-code | 13 |
8429922224 | import bitcoin.base58
import bitcoin.core
import colorcore.addresses
import unittest
import unittest.mock
class Base58AddressTests(unittest.TestCase):
def setUp(self):
bitcoin.SelectParams('mainnet')
def test_from_string_no_namespace(self):
address = colorcore.addresses.Base58Address.from_string('16UwLL9Risc3QfPqBUvKofHmBQ7wMtjvM')
self.assertEqual(0, address.address.nVersion)
self.assertEqual(None, address.namespace)
self.assertEqual(bitcoin.core.x('010966776006953D5567439E5E39F86A0D273BEE'), address.to_bytes())
def test_from_string_with_namespace(self):
address = colorcore.addresses.Base58Address.from_string('akB4NBW9UuCmHuepksob6yfZs6naHtRCPNy')
self.assertEqual(0, address.address.nVersion)
self.assertEqual(19, address.namespace)
self.assertEqual(bitcoin.core.x('010966776006953D5567439E5E39F86A0D273BEE'), address.to_bytes())
def test_from_string_invalid_length(self):
self.assertRaises(
ValueError,
colorcore.addresses.Base58Address.from_string,
'5Hwgr3u458GLafKBgxtssHSPqJnYoGrSzgQsPwLFhLNYskDPyyA')
def test_from_string_invalid_checksum(self):
self.assertRaises(
bitcoin.base58.Base58ChecksumError,
colorcore.addresses.Base58Address.from_string,
'akB4NBW9UuCmHuepksob6yfZs6naHtRCPNz')
def test_from_bytes_invalid_value(self):
self.assertRaises(
ValueError,
colorcore.addresses.Base58Address,
bitcoin.core.x('010966776006953D5567439E5E39F86A0D273BEE'),
256, 1)
self.assertRaises(
ValueError,
colorcore.addresses.Base58Address,
bitcoin.core.x('010966776006953D5567439E5E39F86A0D273BEE'),
1, 256)
self.assertRaises(
ValueError,
colorcore.addresses.Base58Address,
bitcoin.core.x('010966776006953D5567439E5E39F86A0D273BEEFF'),
1, 1)
def test_str_no_namespace(self):
address = colorcore.addresses.Base58Address.from_string('16UwLL9Risc3QfPqBUvKofHmBQ7wMtjvM')
result = str(address)
self.assertEqual('16UwLL9Risc3QfPqBUvKofHmBQ7wMtjvM', result)
def test_str_with_namespace(self):
address = colorcore.addresses.Base58Address.from_string('akB4NBW9UuCmHuepksob6yfZs6naHtRCPNy')
result = str(address)
self.assertEqual('akB4NBW9UuCmHuepksob6yfZs6naHtRCPNy', result)
| martexcoin/colorcore | tests/test_addresses.py | test_addresses.py | py | 2,496 | python | en | code | 2 | github-code | 13 |
16131561693 | from pprint import pp
import graphene
class Gender(graphene.Enum):
MALE = "MALE Person"
FEMALE = "FEMALE Person"
OTHER = "OTHER Type Person"
class Person(graphene.ObjectType):
name = graphene.String()
age = graphene.Int()
gender = graphene.Field(Gender)
class Query(graphene.ObjectType):
person = graphene.Field(Person)
def resolve_person(self, info):
return Person(name="Gudo Van", age=30, gender=Gender.MALE)
schema = graphene.Schema(query=Query)
query = """
query {
person {
gender
age
name
}
}
"""
result = schema.execute(query)
pp(result.data)
| udhayprakash/PythonMaterial | python3/16_Web_Services/e_GraphQL/creating/a_graphene/b5_Enum_types.py | b5_Enum_types.py | py | 661 | python | en | code | 7 | github-code | 13 |
8829825690 | import pandas as pd
users_info=pd.read_csv("./files/users-info.csv",index_col=0)
users_score=pd.read_csv("./files/users-score-uname.csv",index_col=0)
print(users_info.join(users_score.groupby(['username']).sum(),how='inner',on='username'))
#groupy sum
| Johnson-xie/jtthink_python_math | courseware/pandas/06/课件/class6.py | class6.py | py | 259 | python | en | code | 0 | github-code | 13 |
13614760030 | class Rlist(object):
class EmptyList(object):
def __len__(self):
return 0
empty = EmptyList()
def __init__(self, first, rest=empty):
self.first = first
self.rest = rest
def rlist_to_list(rlist):
"""Take an RLIST and returns a Python list with the same elements.
>>> rlist = Rlist(1, Rlist(2, Rlist(3, Rlist(4))))
>>> rlist_to_list(rlist)
[1, 2, 3, 4]
>>> rlist_to_list(Rlist.empty)
[]
"""
##iterative
# lst = []
# while rlist is not Rlist.empty:
# lst.append(rlist.first)
# rlist = rlist.rest
# return lst
##recursive
if rlist is Rlist.empty:
return []
result, rest = [rlist.first], rlist_to_list(rlist.rest)
result.extend(rest)
return result
if __name__ == "__main__":
import doctest
doctest.testmod()
| clovery410/mycode | python/chapter-2/lab8-rlist-1.py | lab8-rlist-1.py | py | 856 | python | en | code | 1 | github-code | 13 |
1531145341 | """A set of simple utility functions for array math."""
import numpy as np
import scipy.signal as sps
def reduce_by_midpoint(array):
"""Subtract off and divide by middle array element.
Sorts the array before picking mid-point, but returned
array is not sorted."""
midpoint = sorted(array)[int(np.round((len(array) - 1) / 2.0))]
return (array - midpoint) / midpoint
def filter_data(array, filter_win_length=0):
"""Filter data with a Savitsky-Golay filter of window-length
filter_win_length.
filter_win_length must be odd and >= 3. This function will
enforce that requirement by adding 1 to filter_win_length
until it is satisfied."""
# If no window length is supplied, defult to 1% of the data vector or 3
if filter_win_length == 0:
filter_win_length = int(np.round(len(array) / 100.0))
if filter_win_length % 2 == 0:
filter_win_length += 1
if filter_win_length < 3:
filter_win_length = 3
return sps.savgol_filter(array, filter_win_length, 1)
def mask_array_ends(array, mask=None):
"""Return the ends of an array.
If mask is an int, returns mask items from each end of array.
If mask is a float, treats mask as a fraction of array length.
If mask is a an array or a slice, return array[mask]."""
if mask is None:
masked_array = array
elif type(mask) == float:
pct_mask = int(len(array) * mask)
masked_array = np.concatenate((array[:pct_mask], array[-pct_mask:]))
elif type(mask) == int:
masked_array = np.concatenate((array[:mask], array[-mask:]))
elif type(mask) in [np.array, list, slice]:
masked_array = array[mask]
else:
raise ValueError("Mask type must be number, array, or slice")
return masked_array
| FaustinCarter/scraps | scraps/fitsS21/utils.py | utils.py | py | 1,806 | python | en | code | 13 | github-code | 13 |
33149687979 | import discord
import praw
from discord.ext import commands, tasks
import random
import requests
import os
from itertools import cycle
filehandle = open("commands.md")
filehandle = filehandle.read()
bot = commands.Bot(command_prefix='$', case_insensitive=True)
reddit = praw.Reddit(client_id=os.environ.get("praw_client_id"), client_secret=os.environ.get("praw_client_secret"),
user_agent="myredditbot1.0")
adult_subs = ['ass', 'boobs', 'nudes', 'milf', 'bdsm']
memes_subs = ['memes', 'dankmemes']
dad_jokes_subs = ['dadjokes']
@bot.event
async def on_ready():
await bot.change_presence(activity=discord.Game(name="with ur feelings"))
myembed = discord.Embed(type="rich",
colour=discord.Color.dark_red(), description=filehandle)
myembed.set_image(
url="https://cdn.pixabay.com/photo/2017/05/04/15/12/welcome-sign-2284312__340.jpg")
myembed.set_footer(text="Bot by: @Muhammad#2048",
icon_url="https://www.flaticon.com/premium-icon/icons/svg/895/895903.svg")
myembed.set_author(
name="FPath", icon_url="https://www.flaticon.com/premium-icon/icons/svg/1183/1183664.svg")
myembed.add_field(name="$meme", value="Used to get a meme", inline=False)
myembed.add_field(
name="$nsfw", value="Used to get a sexy pic of who knows what", inline=False)
myembed.add_field(name="$joke", value="For terrible dad jokes ", inline=False)
myembed.add_field(
name="$covid [country]", value="For the latest stats of the country passed to the command, in case of multiple country names like New Zealand plz wrap them in quotation marks", inline=False)
@bot.event
async def on_member_join(member):
myembed.title = f"Hello and welcome to {member.guild}"
await member.send(embed=myembed)
chan = discord.utils.get(member.guild.channels, name="general")
await chan.send(f"{member.mention} hello and welcome, plz enjoy your time here and be nice to the others and that's pretty much it :)")
@bot.command()
async def nsfw(ctx):
subreddit = reddit.subreddit(random.choice(adult_subs))
top = subreddit.random()
embed = discord.Embed(title=top.title)
embed.set_image(url=top.url)
await ctx.author.send(embed=embed)
@bot.command()
async def meme(ctx):
subreddit = reddit.subreddit(random.choice(memes_subs))
top = subreddit.random()
embed = discord.Embed(title=top.title)
embed.set_image(url=top.url)
await ctx.send(embed=embed)
@bot.command()
async def joke(ctx):
subreddit = reddit.subreddit(random.choice(dad_jokes_subs))
top = subreddit.random()
thejoke = f"{top.title} {top.selftext}"
await ctx.send(thejoke)
headers = {
'x-rapidapi-host': "covid-19-data.p.rapidapi.com",
'x-rapidapi-key': "YOUR_API_KEY"
}
url = "https://covid-19-data.p.rapidapi.com/country"
@bot.command()
async def covid(ctx, arg):
cases = discord.Embed(type='rich', color=discord.Color.dark_blue())
cases.set_thumbnail(
url="https://cdn.pixabay.com/photo/2020/03/19/21/35/covid-4948866__340.jpg")
cases.set_footer(text="Stay home please")
cases.set_author(name="COVID-19 data",
icon_url="https://rapidapi-prod-collections.s3.amazonaws.com/50ac3a55-4378-4965-a9a4-80bc9d05ac7a.jpg")
querystring = {"format": "json", "name": arg}
resp = requests.get(url=url, headers=headers, params=querystring)
resp = resp.json()[0]
for k, v in resp.items():
cases.add_field(name=k, value=v, inline=False)
await ctx.send(embed=cases)
bot.run("YOUR_DISCORD_TOKEN")
| MuhammadAlzamily/fpath | discord_bot.py | discord_bot.py | py | 3,549 | python | en | code | 0 | github-code | 13 |
543719080 | import subprocess
performance = ['sudo', 'sh', '-c', 'echo performance > /sys/devices/system/cpu/cpufreq/policy0/scaling_governor']
powersave = ['sudo', 'sh', '-c', 'echo powersave > /sys/devices/system/cpu/cpufreq/policy0/scaling_governor']
subprocess.check_call(performance)
import tensorflow as tf
import tensorflow.lite as tflite
import base64
import datetime
import requests
import numpy as np
from scipy import signal
import json
import zlib
import sys
import os
seed = 42
tf.random.set_seed(seed)
np.random.seed(seed)
threshold = 0.4
sampling_rate = 16000
resampling_rate = 8000
frame_length = 240
frame_step = 120
ROOT_DIR = "./"
tfModel = "./little.tflite"
url = "http://192.168.1.232:8080/"
zip_path = tf.keras.utils.get_file(
origin="http://storage.googleapis.com/download.tensorflow.org/data/mini_speech_commands.zip",
fname='mini_speech_commands.zip',
extract=True,
cache_dir='.', cache_subdir='data')
data_dir = os.path.join('.', 'data', 'mini_speech_commands')
filenames = tf.io.gfile.glob(str(data_dir) + '/*/*')
filenames = tf.random.shuffle(filenames)
test_files = tf.strings.split(tf.io.read_file(ROOT_DIR +'kws_test_split.txt'),sep='\n')[:-1]
LABELS = ['down', 'stop', 'right', 'left', 'up', 'yes', 'no', 'go']
class preprocess:
def __init__(self, labels, sampling_rate, frame_length, frame_step,
num_mel_bins=None, lower_frequency=None, upper_frequency=None,
num_coefficients=None, mfcc=False, resampling_rate = None):
self.labels = labels
self.sampling_rate = sampling_rate
self.resampling_rate = resampling_rate
self.frame_length = frame_length
self.frame_step = frame_step
self.lower_frequency = lower_frequency
self.upper_frequency = upper_frequency
self.num_mel_bins = num_mel_bins
self.num_coefficients = num_coefficients
num_spectrogram_bins = (frame_length) // 2 + 1
if self.resampling_rate is not None:
rate = self.resampling_rate
else:
rate = self.sampling_rate
self.num_frames = (rate - self.frame_length) // self.frame_step + 1
if mfcc is True:
self.linear_to_mel_weight_matrix = tf.signal.linear_to_mel_weight_matrix(
self.num_mel_bins, num_spectrogram_bins, rate,
self.lower_frequency, self.upper_frequency)
self.preprocess = self.preprocess_with_mfcc
else:
self.preprocess = self.preprocess_with_stft
def custom_resampling(self, audio):
audio = signal.resample_poly(audio, 1, self.sampling_rate // self.resampling_rate)
audio = tf.convert_to_tensor(audio, dtype=tf.float32)
return audio
def read(self, audio_bytes):
audio, _ = tf.audio.decode_wav(audio_bytes)
audio = tf.squeeze(audio, axis=1)
if self.resampling_rate is not None:
audio = tf.numpy_function(self.custom_resampling, [audio], tf.float32)
return audio
def pad(self, audio):
if self.resampling_rate is not None:
rate = self.resampling_rate
else:
rate = self.sampling_rate
zero_padding = tf.zeros([rate] - tf.shape(audio), dtype=tf.float32)
audio = tf.concat([audio, zero_padding], 0)
audio.set_shape([rate])
return audio
def get_spectrogram(self, audio):
stft = tf.signal.stft(audio, frame_length=self.frame_length,
frame_step=self.frame_step, fft_length=self.frame_length)
spectrogram = tf.abs(stft)
return spectrogram
def get_mfccs(self, spectrogram):
mel_spectrogram = tf.tensordot(spectrogram,
self.linear_to_mel_weight_matrix, 1)
log_mel_spectrogram = tf.math.log(mel_spectrogram + 1.e-6)
mfccs = tf.signal.mfccs_from_log_mel_spectrograms(log_mel_spectrogram)
mfccs = mfccs[..., :self.num_coefficients]
return mfccs
def preprocess_with_stft(self, audio_bytes):
audio = self.read(audio_bytes)
audio = self.pad(audio)
spectrogram = self.get_spectrogram(audio)
spectrogram = tf.expand_dims(spectrogram, -1)
spectrogram = tf.image.resize(spectrogram, [32, 32])
return spectrogram
def preprocess_with_mfcc(self, audio_bytes):
audio = self.read(audio_bytes)
audio = self.pad(audio)
spectrogram = self.get_spectrogram(audio)
mfccs = self.get_mfccs(spectrogram)
# Reshaping since only 1 audio at time si given for inference
#print(1, self.num_frames, self.num_coefficients)
mfccs = tf.reshape(mfccs, [1, self.num_frames, self.num_coefficients, 1])
#mfccs = tf.expand_dims(mfccs, -1)
return mfccs
MFCC_OPTIONS = {'frame_length': frame_length, 'frame_step': frame_step, 'mfcc': True,
'lower_frequency': 20, 'upper_frequency': 4000, 'num_mel_bins': 40,
'num_coefficients': 10}
options = MFCC_OPTIONS
Preprocess = preprocess(LABELS, sampling_rate=sampling_rate, resampling_rate=resampling_rate, **options)
interpreter = tflite.Interpreter(model_path=tfModel)
interpreter.allocate_tensors()
input_details = interpreter.get_input_details()
output_details = interpreter.get_output_details()
input_shape = input_details[0]['shape']
def success_checker(predictions, threshold):
"""
Return True if we need to send the audio file to the big model
"""
if threshold > 1 or threshold < 0:
raise ValueError("The threshold must be a probability [0,1]")
pred_probs = tf.nn.softmax(predictions)
pred_probs = tf.sort(pred_probs, direction='DESCENDING')
score_margin = pred_probs[0] - pred_probs[1]
if score_margin < threshold:
return True
else:
return False
accuracy = 0
count = 0
com_size = 0
for file_path in test_files:
parts = tf.strings.split(file_path, os.path.sep)
label = parts[-2]
label_id = tf.argmax(label == LABELS)
audio_binary = tf.io.read_file(file_path)
mfccs = Preprocess.preprocess_with_mfcc(audio_binary)
input_tensor = mfccs
y_true = label_id
interpreter.set_tensor(input_details[0]['index'], input_tensor)
interpreter.invoke()
y_pred = interpreter.get_tensor(output_details[0]['index'])
y_pred = y_pred.squeeze()
BIG = success_checker(y_pred, threshold)
if BIG is True:
now = datetime.datetime.now()
timestamp = int(now.timestamp())
audio_bytes = audio_binary.numpy()
audio_b64bytes = base64.b64encode(audio_bytes)
audio_string = audio_b64bytes.decode()
body = {
# my url
"bn": "http://192.168.1.92/",
"bt": timestamp,
"e": [
{
"n": "audio",
"u": "/",
"t": 0,
"vd": audio_string
}
]
}
com_size += len(json.dumps(body))
r = requests.put(url, json=body)
if r.status_code == 200:
rbody = r.json()
y_pred = int(rbody['predicted_label'])
else:
print("Error")
print(r.text)
else:
y_pred = np.argmax(y_pred)
y_true = y_true.numpy().squeeze()
accuracy += y_pred == y_true
count += 1
accuracy/=float(count)
print("Accuracy: {}".format(accuracy*100))
print("Comunication size: {} MB".format(com_size/(2**20)))
| RoboTuan/ML4IOT_HMW | HMW3/little_client.py | little_client.py | py | 7,562 | python | en | code | 0 | github-code | 13 |
17700665509 | from rest_framework import serializers
from .models import Article, TaggedArticle, ArticleTag
from django.contrib.auth.models import User
from taggit_serializer.serializers import TaggitSerializer, TagListSerializerField
from comments.fields import CommentArticleRelatedField
class UserSerializer(serializers.ModelSerializer):
class Meta:
model = User
fields = [
'username',
'email'
]
class ArticleSerializer(serializers.ModelSerializer, TaggitSerializer):
created_by = UserSerializer(read_only=True)
updated_by = UserSerializer(read_only=True)
comment = CommentArticleRelatedField(required=False, allow_null=True)
tags = TagListSerializerField(required=False, allow_null=True)
class Meta:
model = Article
fields = [
'header',
'body',
'tags',
'created_by',
'created_on',
'updated_by',
'updated_on',
'comment'
]
class ArticleTagSerializer(serializers.ModelSerializer):
class Meta:
model = ArticleTag
fields = [
'name',
'articles'
]
| oshevelo/jul_py_barbershop | barbershop/blog/serializers.py | serializers.py | py | 1,184 | python | en | code | 0 | github-code | 13 |
71004553297 | import os
import shutil
import torch
import torch.nn as nn
import torch.utils.data as data_utils
from matplotlib import pyplot as plt
from tqdm import tqdm
from transformers import HerbertTokenizer, RobertaModel
from config import Config
from datasets.massive import IntentDataset
from models.intent_classifier import IntentClassifier
import utils
config = Config()
experiment_dir = os.path.abspath(f'./results/{config.experiment_name}')
if not os.path.isdir(experiment_dir):
os.mkdir(experiment_dir)
shutil.copyfile('./config.py', f'{experiment_dir}/config.py')
target_dir = os.path.join(experiment_dir, 'models')
if not os.path.isdir(target_dir):
os.mkdir(target_dir)
utils.copy_models('./models', target_dir)
checkpoints_dir = os.path.join(experiment_dir, 'checkpoints')
if not os.path.isdir(checkpoints_dir):
os.mkdir(checkpoints_dir)
device = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu')
tokenizer = HerbertTokenizer.from_pretrained(
"allegro/herbert-klej-cased-tokenizer-v1")
collate_fn = utils.collate_fn_factory(tokenizer)
dataset = IntentDataset(path=config.dataset_path,
mode='train', random_seed=config.dataset_random_seed)
train_loader = data_utils.DataLoader(dataset,
batch_size=config.batch_size,
shuffle=True,
collate_fn=collate_fn)
val_dataset = IntentDataset(
path=config.dataset_path, mode='val', random_seed=config.dataset_random_seed)
test_loader = data_utils.DataLoader(val_dataset,
batch_size=config.batch_size,
shuffle=True,
collate_fn=collate_fn)
language_model = RobertaModel.from_pretrained(
"allegro/herbert-klej-cased-v1", is_decoder=False)
language_model = language_model.to(device)
intent_classifier = IntentClassifier(
hidden_dim=768, output_dim=len(dataset.intents), device=device)
intent_classifier = intent_classifier.to(device)
optimizer = torch.optim.Adam(
intent_classifier.parameters(), lr=config.learning_rate, betas=config.adam_betas)
loss_func = nn.BCELoss()
loss_list = []
val_loss_list = []
acc_list = []
val_acc_list = []
best_acc = 0.83
for epoch_index in range(1, config.epoch_count + 1):
epoch_loss = 0
val_epoch_loss = 0
epoch_acc = 0
val_epoch_acc = 0
language_model.train()
intent_classifier.train()
loader_len = len(train_loader)
for tokenizer_output, labels in tqdm(train_loader):
tokenizer_output = {key: val.to(device)
for key, val in tokenizer_output.items()}
labels_one_hot = nn.functional.one_hot(labels, len(dataset.intents))
labels_one_hot = labels_one_hot.to(device).type(torch.float)
with torch.no_grad():
lm_outputs = language_model(**tokenizer_output)
cls_hiddens = lm_outputs.pooler_output
hidden_state = lm_outputs.last_hidden_state.mean(dim=1)
intents_pred = intent_classifier(cls_hiddens, hidden_state, lm_outputs.last_hidden_state)
loss = loss_func(intents_pred, labels_one_hot)
epoch_loss += loss.item()
intents_decoded = intents_pred.argmax(dim=1).cpu()
accuracy = torch.sum(intents_decoded == labels).sum() / \
intents_decoded.shape[0]
epoch_acc += accuracy.item()
optimizer.zero_grad()
loss.backward()
optimizer.step()
epoch_acc /= len(train_loader)
language_model.eval()
intent_classifier.eval()
with torch.no_grad():
for tokenizer_output, labels in tqdm(test_loader):
tokenizer_output = {key: val.to(device)
for key, val in tokenizer_output.items()}
labels_one_hot = nn.functional.one_hot(
labels, len(dataset.intents))
labels_one_hot = labels_one_hot.to(device).type(torch.float)
lm_outputs = language_model(**tokenizer_output)
cls_hiddens = lm_outputs.pooler_output
hidden_state = lm_outputs.last_hidden_state.mean(dim=1)
intents_pred = intent_classifier(cls_hiddens, hidden_state, lm_outputs.last_hidden_state)
intents_decoded = intents_pred.argmax(dim=1).cpu()
accuracy = torch.sum(
intents_decoded == labels).sum() / intents_decoded.shape[0]
val_epoch_acc += accuracy.item()
loss = loss_func(intents_pred, labels_one_hot)
val_epoch_loss += loss.item()
val_epoch_acc /= len(test_loader)
loss_list.append(epoch_loss)
val_loss_list.append(val_epoch_loss)
acc_list.append(epoch_acc)
val_acc_list.append(val_epoch_acc)
info_string = f'Epoch: {epoch_index}, train_loss: {epoch_loss:.4f}, val_loss: {val_epoch_loss:.4f}, train_acc: {epoch_acc:.4f}, val_acc: {val_epoch_acc:.4f}'
print(info_string)
with open(os.path.join(experiment_dir, 'log.txt'), mode='a') as f:
f.write(f'{info_string}\n')
if val_epoch_acc > best_acc:
checkpoint_path = os.path.join(experiment_dir, f'best.pt')
print(f'Saving model (better accuracy {best_acc:.4f} -> {val_epoch_acc:.4f}) {checkpoint_path}')
torch.save(intent_classifier.state_dict(), checkpoint_path)
with open(os.path.join(experiment_dir, 'best_info.txt'), mode='w') as f:
f.write(info_string)
best_acc = val_epoch_acc
if epoch_index % config.save_every == 0:
checkpoint_path = os.path.join(
experiment_dir, 'checkpoints', f'checkpoint_intent_classifier_E_{epoch_index}_L_{val_epoch_acc:.4f}.pt')
print(f'Saving checkpoint {checkpoint_path}')
torch.save(intent_classifier.state_dict(), checkpoint_path)
plt.cla()
plt.plot(loss_list)
plt.savefig(f'{experiment_dir}/loss.png')
plt.cla()
plt.plot(val_loss_list)
plt.savefig(f'{experiment_dir}/val_loss.png')
plt.cla()
plt.plot(acc_list)
plt.savefig(f'{experiment_dir}/acc.png')
plt.cla()
plt.plot(val_acc_list)
plt.savefig(f'{experiment_dir}/val_acc.png')
| Kacprate/Intent-classification-Polish-language | train.py | train.py | py | 6,189 | python | en | code | 0 | github-code | 13 |
3035417553 | import matplotlib.pyplot as plt
from matplotlib import rc
import numpy as np
# Set the global font and size
rc('font',**{'family':'sans-serif','sans-serif':['DejaVu Sans'],'size':25})
# Set the font used for math
rc('mathtext',**{'default':'regular'})
def stylize_axes(ax, size=25, legend=True, xlabel=None, ylabel=None, title=None, xticks=None, yticks=None, xticklabels=None, yticklabels=None, top_spine=True, right_spine=True):
"""
stylizes the axes of our plots.
"""
ax.spines['top'].set_visible(top_spine)
ax.spines['right'].set_visible(right_spine)
ax.xaxis.set_tick_params(top='off', direction='out', width=1)
ax.yaxis.set_tick_params(right='off', direction='out', width=1)
if title is not None:
ax.set_title(title)
if xlabel is not None:
ax.set_xlabel(xlabel)
if ylabel is not None:
ax.set_ylabel(ylabel)
if xticks is not None:
ax.set_xticks(xticks)
if yticks is not None:
ax.set_yticks(yticks)
if xticklabels is not None:
ax.set_xticklabels(xticklabels)
if yticklabels is not None:
ax.set_yticklabels(yticklabels)
if legend:
ax.legend(fontsize=size)
return ax
def custom_logplot(ax, x, y, label="loss", xlims=None, ylims=None, color='red', linestyle='solid', marker=None):
"""
Customized plot with log scale on y axis.
"""
if marker is None:
ax.semilogy(x, y, color=color, label=label, linestyle=linestyle)
else:
ax.semilogy(x, y, color=color, label=label, linestyle=linestyle, marker=marker)
if xlims is not None:
ax.set_xlim(xlims)
if ylims is not None:
ax.set_ylim(ylims)
return ax
def custom_scatterplot(ax, x, y, xlims=None, ylims=None, error=1.0, color='green', markerscale=10):
"""
Customized scatter plot where marker size is proportional to error measure.
"""
markersize = error * markerscale
ax.scatter(x, y, color=color, marker='o', s=markersize, alpha=0.5)
if xlims is not None:
ax.set_xlim(xlims)
if ylims is not None:
ax.set_ylim(ylims)
return ax
def custom_lineplot(ax, x, y, label=None, xlims=None, ylims=None, color="red", linestyle="solid", linewidth=2.0, marker=None):
"""
Customized line plot.
"""
if label is not None:
if marker is None:
ax.plot(x, y, color=color, label=label, linestyle=linestyle, linewidth=linewidth)
else:
ax.plot(x, y, color=color, label=label, linestyle=linestyle, linewidth=linewidth, marker=marker)
else:
if marker is None:
ax.plot(x, y, color=color, linestyle=linestyle, linewidth=linewidth)
else:
ax.plot(x, y, color=color, linestyle=linestyle, linewidth=linewidth, marker=marker)
if xlims is not None:
ax.set_xlim(xlims)
if ylims is not None:
ax.set_ylim(ylims)
return ax
def custom_barchart(ax, x, y, error, xlims=None, ylims=None, color='blue', width=1.0, label=None):
"""
Customized bar chart with positive error bars only.
"""
error_kw = {'capsize': 5, 'capthick': 1, 'ecolor': 'black'}
error = [np.zeros(len(error)), error]
ax.bar(x, y, color=color, width=width, yerr=error, error_kw=error_kw, align='center', label=label)
if xlims is not None:
ax.set_xlim(xlims)
if ylims is not None:
ax.set_ylim(ylims)
return ax
def custom_loglogplot(ax, x, y, label="loss", xlims=None, ylims=None, color='red', linestyle='solid', marker=None):
"""
Customized plot with log scale on both axis
"""
if marker is None:
ax.loglog(x, y, color=color, label=label, linestyle=linestyle)
else:
ax.loglog(x, y, color=color, label=label, linestyle=linestyle, marker=marker)
if xlims is not None:
ax.set_xlim(xlims)
if ylims is not None:
ax.set_ylim(ylims)
return ax
def plot_loss_history(loss_history, fname="./logs/loss.png", size=25, figsize=(8,6)):
"""
plots the loss history.
"""
loss_train = np.array(loss_history.loss_train)
loss_test = np.array(loss_history.loss_test)
fig, ax = plt.subplots(figsize=figsize)
custom_logplot(ax, loss_history.steps, loss_train, label="Train loss", color='blue', linestyle='solid')
custom_logplot(ax, loss_history.steps, loss_test, label="Train loss", linestyle='dashed')
stylize_axes(ax, size=size, xlabel="No. of iterations", ylabel="M.s.e.")
fig.tight_layout()
fig.savefig(fname, dpi=300, bbox_inches='tight', transparent=True)
def plot_three_bus(t, y_eval, y_pred, fname="./logs/test-trajectory.png", size=25, figsize=(8,6)):
"""
plots the exact and predicted power network trajectories.
"""
t = t.reshape(-1,)
ylims = [None, None, None, None, (0.2, 1.2)]
xlabel = [None, None, None, None, 'time (s)']
ylabel = ['$\omega_1(t)$', '$\omega_2(t)$', '$\delta_2(t)$', '$\delta_3(t)$', '$V_3(t)$']
fig, ax = plt.subplots(nrows=5, ncols=1, figsize=figsize)
for i in range(5):
custom_lineplot(ax[i], t, y_eval[i,...].reshape(-1,), label="Exact", ylims=ylims[i])
custom_lineplot(ax[i], t, y_pred[i,...].reshape(-1,), color="blue", linestyle="dashed", label="Predicted", ylims=ylims[i])
stylize_axes(ax[i], size=size, xlabel=xlabel[i], ylabel=ylabel[i])
fig.savefig(fname, dpi=300, bbox_inches='tight', transparent=True)
def plot_regression(predicted, y, fname="./log/regression.png", size=20, figsize=(8,6), x_line=None, y_line=None):
"""
Parity plot.
"""
predicted = predicted.reshape(-1,)
y = y.reshape(-1,)
if x_line is None:
x_line = [y.min(), y.max()]
y_line = [y.min(), y.max()]
fig, ax = plt.subplots(figsize=figsize)
custom_lineplot(ax, x_line, y_line, color="yellow", linestyle="dashed", linewidth=3.0)
custom_scatterplot(ax, predicted, y, color='blue', markerscale=10)
stylize_axes(ax, size=size, xlabel="Predicted", ylabel="Exact", legend=False)
fig.tight_layout()
fig.savefig(fname, dpi=300, bbox_inches='tight', transparent=True)
def plot_barchart(train, test, fname="./log/regression.png", size=20, figsize=(8,6)):
"""
Plots a a bar chart.
"""
# train \in [num, 2]
# test \in [num, 2]
train = train.reshape(-1,2)
test = test.reshape(-1,2)
mean_train = train.mean(axis=0)
mean_test = test.mean(axis=0)
error_train = train.std(axis=0)
error_test = test.std(axis=0)
print("mean train...", mean_train)
print("mean_test...", mean_test)
print("error_train...", error_train)
print("error_test...", error_test)
width = .25
x = np.arange(len(mean_train))
fig, ax = plt.subplots(figsize=figsize)
custom_barchart(ax, x, mean_train, error_train, color='blue', width=width, label='Train')
custom_barchart(ax, x + width, mean_test, error_test, color='red', width=width, label='Test')
xticks = x + width/2
xticklabels = ['Stacked', 'Unstacked']
stylize_axes(ax, size=size, ylabel="M.s.e.", xticks=xticks, xticklabels=xticklabels, legend=True)
fig.tight_layout()
fig.savefig(fname, dpi=300, bbox_inches='tight', transparent=True)
def plot_width_analysis(width, train, test, fname="./log/width_analysis.png", size=20, figsize=(8,6)):
"""
Plots losses as a function of the nn width.
"""
train = train.reshape(-1,)
test = test.reshape(-1,)
fig, ax = plt.subplots(figsize=figsize)
custom_loglogplot(ax, width, train, linestyle='dashed', marker="s", color='red', label='Train')
custom_loglogplot(ax, width, test, linestyle='dashed', marker="o", color='blue', label='Test')
stylize_axes(ax, size=size, xlabel="Width", ylabel="M.s.e")
fig.tight_layout()
fig.savefig(fname, dpi=300, bbox_inches='tight', transparent=True)
def plot_depth_analysis(depth, train, test, fname="./log/width_analysis.png", size=20, figsize=(8,6)):
"""
Plots losses as a function of the nn depth.
"""
train = train.reshape(-1,)
test = test.reshape(-1,)
fig, ax = plt.subplots(figsize=figsize)
custom_logplot(ax, depth, train, linestyle='dashed', marker="s", color='red', label='Train')
custom_logplot(ax, depth, test, linestyle='dashed', marker="o", color='blue', label='Test')
stylize_axes(ax, size=size, xlabel="Depth", ylabel="M.s.e")
fig.tight_layout()
fig.savefig(fname, dpi=300, bbox_inches='tight', transparent=True)
def plot_num_train_analysis(num_train, train, test, fname="./log/num_train.png", size=20, figsize=(8,6)):
"""
Plots losses as a function of the number of training examples.
"""
train = train.reshape(-1,)
test = test.reshape(-1,)
fig, ax = plt.subplots(figsize=figsize)
custom_loglogplot(ax, num_train, train, linestyle='dashed', marker="s", color='red', label='Train')
custom_loglogplot(ax, num_train, test, linestyle='dashed', marker="o", color='blue', label='Test')
stylize_axes(ax, size=size, xlabel="No. of training examples", ylabel="M.s.e")
fig.tight_layout()
fig.savefig(fname, dpi=300, bbox_inches='tight', transparent=True)
def plot_L2relative_error(N, error, fname="./log/num_train.png", size=20, figsize=(8,6)):
"""
Plots the L_2-relative error as a function of the number of integration steps.
"""
error = error.reshape(-1,)
fig, ax = plt.subplots(figsize=figsize)
custom_lineplot(ax, N, error, color="blue", linestyle="dashed", linewidth=3.0, marker='s')
stylize_axes(ax, size=size, xlabel="No. of time steps $N$", ylabel="L$_2$-relative error", legend=False)
fig.tight_layout()
fig.savefig(fname, dpi=300, bbox_inches='tight', transparent=True)
| cmoyacal/DAE-PINNs | src/utils/plots.py | plots.py | py | 9,757 | python | en | code | 2 | github-code | 13 |
24186110246 | import os
import matplotlib.image as mpimg
import csv
import numpy as np
import cv2
import matplotlib.pyplot as plt
import math
#cd /home/workspace/CarND-Behavioral-Cloning-P3
samples = []
#process data from csv
data_path = './Drive_Data/'
csv_path_filename = data_path + 'driving_log.csv'
images_path = data_path + 'IMG/'
with open(csv_path_filename) as csvfile:
reader = csv.reader(csvfile)
next(reader,'None')
for line in reader:
samples.append(line)
print('Total number of frames = ', len(samples))
recorded_images = []
measurement_angles = []
#for sample in samples:
# source_path = sample[0]
# filename = source_path.split('/')[-1]
# image = cv2.imread(images_path + filename)
# recorded_images.append(image)
# measurement_angle = float(sample[3])
# measurement_angles.append(measurement_angle)
#X_train = np.array (recorded_images)
#y_train = np.array(measurement_angles)
import keras
from keras.models import Sequential, Model, load_model
from keras.layers import Flatten, Dense, Lambda, Dropout, Activation
from keras.layers.convolutional import Convolution2D, Cropping2D, Conv2D
from keras.layers.pooling import MaxPooling2D
from keras.callbacks import ModelCheckpoint
import sklearn
from sklearn.utils import shuffle
from sklearn.model_selection import train_test_split
#implement NVidia Deep Neural Network for Autonomous Vehicles
#Add Dropouts to prevent overfitting
model = Sequential()
model.add(Lambda(lambda x: x / 127.5 - 1, input_shape=(160,320,3)))
model.add(Cropping2D(cropping=((60,25),(0,0))))
model.add(Conv2D(24, (5, 5), activation="relu", strides=(2, 2)))
#model.add(Dropout(0.4, noise_shape=None, seed=None))
model.add(Conv2D(36, (5, 5), activation="relu", strides=(2, 2)))
model.add(Conv2D(48, (5, 5), activation="relu", strides=(2, 2)))
model.add(Conv2D(64, (3, 3), activation="relu"))
model.add(Conv2D(64, (3, 3), activation="relu"))
model.add(Flatten())
model.add(Dropout(0.5, noise_shape=None, seed=None))
model.add(Dense(100))
model.add(Dense(50))
model.add(Dense(10))
model.add(Dense(1))
#model.summary()
def generator(samples, batch_size=32):
num_samples = len(samples)
while 1: # Loop forever so the generator never terminates
shuffle(samples)
for offset in range(0, num_samples, batch_size):
batch_samples = samples[offset:offset+batch_size]
images = []
angles = []
for batch_sample in batch_samples:
source_path = batch_sample[0]
filename = source_path.split('/')[-1]
for index in ["center", "left", "right"]:
filename = filename.split('_')
filename[0]=index
filename='_'.join(filename)
image = cv2.imread(images_path + filename)
images.append(image)
if index == "center":
images.append(cv2.flip(image, 1))
#Corecction of 0.2 for left and right images
correction = 0.2
angle = float(batch_sample[3])
angles.append(angle)
angles.append(-1.0 * angle)
angles.append(angle + correction)
angles.append(angle - correction)
X_train = np.array(images)
y_train = np.array(angles)
yield sklearn.utils.shuffle(X_train, y_train)
# Splitting the train and test samples and creating batches using the generator
train_samples, validation_samples = train_test_split(samples, test_size=0.2)
batch_size = 100
print('Number of Training Images:', len(train_samples))
print('Number of Validation Images:', len(validation_samples))
train_generator = generator(train_samples, batch_size=batch_size)
validation_generator = generator(validation_samples, batch_size=batch_size)
model.compile(loss='mse', optimizer='adam')
filepath="weights.best.hdf5"
checkpoint = ModelCheckpoint(filepath, monitor='val_loss', verbose=1, save_best_only=True, mode='min')
callbacks_list = [checkpoint]
epochs = 15
history_object = model.fit_generator(train_generator, steps_per_epoch=(len(train_samples)/batch_size), validation_data=validation_generator, validation_steps=(len(validation_samples)/batch_size), epochs=epochs, callbacks=callbacks_list, verbose=1)
#save only best model to help prevent overfitting if validation is not improving
model.save('model_last_epoch.h5')
model.load_weights("weights.best.hdf5")
model.save('model.h5')
plt.plot(history_object.history['loss'])
plt.plot(history_object.history['val_loss'])
plt.title('model mean squared error loss')
plt.ylabel('mean squared error loss')
plt.xlabel('epoch')
plt.legend(['training set', 'validation set'], loc='upper right')
plt.show()
| JSachdev92/BehaviouralCloning | model.py | model.py | py | 4,878 | python | en | code | 0 | github-code | 13 |
1652840945 | # 开始一直提示AttributeError: 'NoneType' object has no attribute 'left',原来是要先判断节点存在。
# 虽然是自己写的,但是还是有点不理解后面两个if和最后return的关系,根据这个例子看来递归前面不一定要加return,只要递归函数内部有return即可。
# 同时,是先return再运算if里的递归的函数的,很奇怪,先标*吧
# Definition for a binary tree node.
# class TreeNode(object):
# def __init__(self, x):
# self.val = x
# self.left = None
# self.right = None
class Solution(object):
def invertTree(self, root):
"""
:type root: TreeNode
:rtype: TreeNode
"""
if root:
if root.left or root.right:
x = root.left
root.left = root.right
root.right = x
if root.left:
self.invertTree(root.left)
if root.right:
self.invertTree(root.right)
return root
| fire717/Algorithms | LeetCode/python/_226.InvertBinaryTree.py | _226.InvertBinaryTree.py | py | 1,040 | python | zh | code | 6 | github-code | 13 |
35596982649 | import numpy as np
import cp2110
if __name__ == '__main__':
# This will raise an exception if a device is not found.
try:
d = cp2110.CP2110Device(vid=0x10c4, pid=0xea80)
except:
raise IOError("Device not found")
# You can also find a device by path.
#cp2110.CP2110Device(path='/dev/hidraw0')
#usb_info = cp2110.enumerate(vid=0x10c4, pid=0xea80)
#if usb_info:
# print(usb_info.as_dict())
#19200 7O1 expected for UNI-T D02 type cables
baud=115200 #19200
parity=cp2110.PARITY.ODD
data_bits=cp2110.DATA_BITS.SEVEN
for stop_bits in [cp2110.STOP_BITS.SHORT, cp2110.STOP_BITS.LONG ]:
for flow_control in [cp2110.FLOW_CONTROL.DISABLED,cp2110.FLOW_CONTROL.ENABLED]:
# for parity in [cp2110.PARITY.EVEN, cp2110.PARITY.MARK, cp2110.PARITY.ODD, cp2110.PARITY.SPACE ]:
# for data_bits in [ cp2110.DATA_BITS.EIGHT, cp2110.DATA_BITS.FIVE,\
# cp2110.DATA_BITS.SEVEN, cp2110.DATA_BITS.SIX]:
# for baud in np.arange(1200,256001,1200):
# The UART settings are dictated by the device that embeds the CP2110. It
# may be configured correctly by default, or you may need to set manually.
d.set_uart_config(cp2110.UARTConfig(
baud=int(baud),
parity=parity,
flow_control=flow_control,
data_bits=data_bits,
stop_bits=stop_bits))
# Fetch the current uart configuration. This is the UART connection from the
# CP2110 to the microcontroller (or whatever) it's wired up to.
c = d.get_uart_config()
# And you can clear any pending data in the on-chip I/O buffers.
d.purge_fifos() # The default is cp2110.FIFO.BOTH
d.purge_fifos(cp2110.FIFO.TX)
d.purge_fifos(cp2110.FIFO.RX)
# The UART in your device may need to be explicitly enabled, particularly if
# you've already explicitly disabled it as in this example.
#if not d.is_uart_enabled(): d.enable_uart()
d.enable_uart()
# The write method accepts byte strings or arrays of ints.
d.write(b'\x06\xab\xcd\x03\x5e\x01\xd9')
#d.write([0x06,0xab,0xcd,0x03,0x5e,0x01,0xd9])
# The default read size will return 63 bytes (at most), which is the maximum
# supported by this chip. Reads do not block.
rv = d.read()
print(c.__dict__)
if len(rv) > 0:
print("\t",repr(rv))
print("")
d.disable_uart()
exit(1)
# If you ever need to disable the UART, you can.
#d.disable_uart()
| djorlando24/pyLabDataLogger | tests/test_cp2110.py | test_cp2110.py | py | 2,977 | python | en | code | 11 | github-code | 13 |
73667101459 | from rest_framework.viewsets import ViewSet
from rest_framework.response import Response
from rest_framework.request import Request
from rest_framework import status
from django.template.defaultfilters import slugify
from .models import Campaign, Subscriber
from .serializers import CampaignSerializer, SubscriberSerializer
# Create your views here.
class CampaignViewset(ViewSet):
def get_all_campaigns(self, request: Request) -> Response:
campaigns = Campaign.objects.all()
serializer = CampaignSerializer(campaigns, many=True)
return Response(serializer.data, status=status.HTTP_200_OK)
def get_single_campaign(self, request: Request, campaign_slug: str) -> Response:
try:
campaign = Campaign.objects.get(slug=campaign_slug)
except Campaign.DoesNotExist:
return Response(status=status.HTTP_404_NOT_FOUND)
serializer = CampaignSerializer(campaign)
return Response(serializer.data, status=status.HTTP_200_OK)
def get_id_from_slug(self, request: Request, campaign_slug: str) -> Response:
try:
campaign = Campaign.objects.get(slug=campaign_slug)
except Campaign.DoesNotExist:
return Response(status=status.HTTP_404_NOT_FOUND)
return Response(campaign.id, status=status.HTTP_200_OK)
def create_campaign(self, request: Request) -> Response:
serailizer = CampaignSerializer(data=request.data)
serailizer.is_valid(raise_exception=True)
title = serailizer.validated_data.get('title')
description = serailizer.validated_data.get('description')
logo = serailizer.validated_data.get('logo')
to_assign = slugify(title)
filtered_campaigns = Campaign.objects.filter(slug=to_assign)
if filtered_campaigns.exists():
to_assign = to_assign + f"-{filtered_campaigns.count()}"
Campaign.objects.create(
title=title, description=description, slug=to_assign, logo=logo)
return Response(status=status.HTTP_201_CREATED)
class SubscriberViewset(ViewSet):
def get_all_subscribers(self, request: Request) -> Response:
subscribers = Subscriber.objects.all()
serializer = SubscriberSerializer(subscribers, many=True)
return Response(serializer.data, status=status.HTTP_200_OK)
def create_subscriber(self, request: Request) -> Response:
serailizer = SubscriberSerializer(data=request.data)
serailizer.is_valid(raise_exception=True)
email = serailizer.validated_data.get('email')
campaign = serailizer.validated_data.get('campaign')
try:
Campaign.objects.get(slug=campaign.slug)
except Campaign.DoesNotExist:
return Response(status=status.HTTP_404_NOT_FOUND)
finally:
Subscriber.objects.create(email=email, campaign=campaign)
return Response(status=status.HTTP_201_CREATED)
| DevJoshi030/Next-Demo-API | api/views.py | views.py | py | 2,957 | python | en | code | 0 | github-code | 13 |
10396425009 | from flask import Flask, request, render_template
import data_utils
import model_nn
import sqlite3
import argparse
import database as db
app = Flask(__name__)
@app.route("/", methods=["GET"])
def home():
return "This is a default landing page!"
@app.route("/db/userdata", methods=["GET"])
def view_data():
connection = sqlite3.connect("data/database.db")
connection.row_factory = sqlite3.Row
cur = connection.cursor()
cur.execute("select * from userdata")
rows = cur.fetchall()
return render_template("list.html", rows=rows)
@app.route("/db/scores", methods=["GET"])
def view_scores():
connection = sqlite3.connect("data/database.db")
connection.row_factory = sqlite3.Row
cur = connection.cursor()
cur.execute("select * from scores")
rows = cur.fetchall()
return render_template("scores.html", rows=rows)
@app.route('/api/score/', methods=['POST'])
def regression():
req_struct = request.get_json()
data_utils.push_to_userdata_db(req_struct)
prediction = model_nn.predict()
return prediction
@app.route('/api/report-score/', methods=['POST'])
def report_score():
req_json = request.get_json()
data_utils.push_to_score_db(req_json.get("score"))
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--initialize', action='store_true') # flag for updating schema
args = parser.parse_args()
if args.initialize:
db.init_db()
app.run(debug=True) | slinakm/neuro_stress | backend/server.py | server.py | py | 1,480 | python | en | code | 0 | github-code | 13 |
28571097975 | class Person():
def __init__(self, nom, prenom) :
self.nom = nom
self.prenom = prenom
def SePresenter(self):
return "je suis " + self.nom + self.prenom
p=Person("pelagie " , "AINTANGAR")
j=Person("emmanuel ", "AINTANGAR")
e=Person("eliakim ", "AINTANGAR")
print(p.SePresenter())
print(j.SePresenter())
print(e.SePresenter()) | PELAGIE-AINTANGAR/runtrack-python-poo | runtrack_poo_jour1/job4.py | job4.py | py | 356 | python | en | code | 0 | github-code | 13 |
12508828312 | #!/usr/bin/env python3
import time
import RPi.GPIO as GPIO
build_in_trigger = \
['rising', 'falling', 'both']
''' func: trigger()
'''
def trigger(channel_list, _name, params):
edge_type = {'rising': GPIO.RISING, 'falling': GPIO.FALLING, 'both': GPIO.BOTH}
for channel in channel_list:
GPIO.add_event_detect(channel, edge_type[_name])
while True:
for channel in channel_list:
if GPIO.event_detected(channel):
break
else:
time.sleep(0.2)
continue
break
for channel in channel_list:
GPIO.remove_event_detect(channel)
return True
| lwj786/RPi_GPIO_scheme | build_in_input.py | build_in_input.py | py | 655 | python | en | code | 0 | github-code | 13 |
70864088658 | # -*- coding: utf-8 -*-
# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*-
# vi: set ft=python sts=4 ts=4 sw=4 et:
"""
Unit tests for elementary surfplot-based visualisations
"""
import pytest
import nibabel as nb
import numpy as np
import pandas as pd
import pyvista as pv
from hyve_examples import (
get_pain_thresh_nifti,
get_schaefer400_cifti,
get_schaefer400_synthetic_conmat,
)
from hyve.plot import unified_plotter, Layer
from hyve.surf import CortexTriSurface
from hyve.util import (
filter_adjacency_data,
filter_node_data,
PointDataCollection,
PointData,
NetworkDataCollection,
NetworkData,
)
@pytest.mark.ci_unsupported
def test_unified_plotter():
surf = CortexTriSurface.from_nmaps(projections=('pial', 'inflated'))
unified_plotter(
surf=surf,
surf_alpha=0.2,
off_screen=False,
)[0].show()
unified_plotter(
surf=surf,
surf_alpha=0.2,
off_screen=False,
hemisphere='left',
hemisphere_slack=1.2,
surf_projection='inflated',
)[0].show()
surf.add_vertex_dataset(
'data',
data=np.random.randn(40962 * 2),
apply_mask=False,
)
unified_plotter(
surf=surf,
surf_scalars='data',
surf_scalars_cmap='magma',
surf_alpha=0.2,
hemisphere_slack=1.2,
off_screen=False,
)[0].show()
surf_layer = Layer(
name='data',
cmap='hot',
cmap_negative='cool',
below_color='white',
clim=(1.5, 3.0),
alpha=0.8,
)
unified_plotter(
surf=surf,
surf_scalars='data',
surf_scalars_cmap='gray',
surf_scalars_layers=(surf_layer,),
surf_alpha=0.2,
hemisphere_slack=1.2,
off_screen=False,
)[0].show()
vol = nb.load(get_pain_thresh_nifti())
vol_data = vol.get_fdata()
vol_loc = np.where(vol_data > 0)
vol_scalars = vol_data[vol_data > 0]
vol_coor = np.stack(vol_loc)
vol_coor = (vol.affine @ np.concatenate(
(vol_coor, np.ones((1, vol_coor.shape[-1])))
))[:3].T
vol_voxdim = vol.header.get_zooms()
points_pain = PointData(
pv.PointSet(vol_coor),
data={'pain': vol_scalars},
point_size=np.min(vol_voxdim[:3]),
)
points = PointDataCollection([points_pain])
points_layer_pain = Layer(
name='pain',
cmap='viridis',
clim='robust',
alpha=0.8,
)
unified_plotter(
points=points,
points_scalars='pain',
hemisphere_slack=1.2,
off_screen=False,
)[0].show()
sphere_bounds = np.arange(-30, 30, 3)
sphere_coor = np.concatenate([
c.reshape(1, -1) for c in
np.meshgrid(sphere_bounds, sphere_bounds, sphere_bounds)
]).T
radius = np.sqrt((sphere_coor ** 2).sum(-1))
sphere_index = radius < 30
radius = radius[sphere_index]
sphere_coor = sphere_coor[sphere_index]
sphere_inner_index = radius < 25
sphere_data = 1 + ((radius - 20) / 10)
sphere_data[sphere_inner_index] = -(
1 + ((radius[sphere_inner_index] - 10) / 10))
sphere_inmost_index = radius < 20
sphere_data[sphere_inmost_index] = np.random.randn(
sphere_inmost_index.sum())
points = PointDataCollection([
points_pain,
PointData(
pv.PointSet(sphere_coor),
data={'sphere': sphere_data},
point_size=6,
)
])
points_layer_sphere = Layer(
name='sphere',
cmap='Reds',
cmap_negative='Blues',
below_color=(0, 0, 0, 0),
clim=(1.0, 2.0),
alpha=0.8,
)
unified_plotter(
surf=surf,
surf_alpha=0.2,
points=points,
points_scalars_layers=(points_layer_pain, points_layer_sphere),
hemisphere_slack=1.2,
off_screen=False,
)[0].show()
parcellation = get_schaefer400_cifti()
surf_lr = CortexTriSurface.from_tflow(load_mask=True, projections=('inflated',))
surf_lr.add_vertex_dataset(
'parcellation',
data=nb.load(parcellation).get_fdata().ravel(),
is_masked=True,
)
surf_lr.add_vertex_dataset(
'data',
data=np.random.rand(32492 * 2),
apply_mask=False,
)
node_coor = surf_lr.parcel_centres_of_mass('parcellation', 'inflated')
cov = pd.read_csv(
get_schaefer400_synthetic_conmat(), sep='\t', header=None
).values
vis_nodes_edge_selection = np.zeros(400, dtype=bool)
vis_nodes_edge_selection[0:2] = True
vis_nodes_edge_selection[200:202] = True
node_data = filter_node_data(cov.sum(axis=0))
node_data['radius'] = np.random.rand(400)
node_data['opacity'] = np.random.rand(400)
edge_data = filter_adjacency_data(
cov, connected_node_selection=vis_nodes_edge_selection)
node_clim = (node_data['node_val'].min(), node_data['node_val'].max())
edge_clim = (-1, 1)
node_lh = np.zeros(400, dtype=bool)
node_lh[:200] = True
network_data = NetworkDataCollection([
NetworkData(
'vis_conn',
coor=node_coor,
nodes=node_data,
edges=edge_data,
lh_mask=node_lh,
),
])
unified_plotter(
networks=network_data,
node_clim=node_clim,
node_color='node_val',
node_radius='radius',
node_radius_range=(1, 10),
node_alpha='opacity',
edge_clim=edge_clim,
hemisphere_slack=1.2,
off_screen=False,
)[0].show()
unified_plotter(
surf=surf_lr,
surf_projection='inflated',
surf_alpha=0.2,
networks=network_data,
node_clim=node_clim,
node_color='node_val',
edge_clim=edge_clim,
hemisphere_slack=1.2,
off_screen=False,
)[0].show()
points = PointDataCollection([points_pain])
unified_plotter(
surf=surf_lr,
surf_projection='inflated',
surf_scalars='data',
surf_scalars_cmap='magma',
surf_alpha=0.2,
points=points,
points_scalars_layers=(points_layer_pain,),
networks=network_data,
node_clim=node_clim,
node_color='node_val',
edge_clim=edge_clim,
hemisphere_slack=1.2,
off_screen=False,
)[0].show()
| hypercoil/hyve | tests/test_uniplot.py | test_uniplot.py | py | 6,336 | python | en | code | 0 | github-code | 13 |
10465611815 | def ler_fasta(arquivo):
sequencia = ''
dna = []
with open(arquivo, 'r') as fasta:
sequencia = ''
for linha in fasta:
if not linha.startswith('>'):
sequencia += linha
else:
kd = linha
dna.append(sequencia)
return dna, kd
def replace_kd(kd):
k = d = ""
ind_k = kd.find('k')
ind_d = kd.find('d')
len_kd = len(kd)
for i in range(ind_k + 1, ind_d):
if kd[i] != "=":
k += kd[i]
for i in range(ind_d + 1, len_kd):
if kd[i] != "=":
d += kd[i]
return int(k), int(d)
def gerar_arquivo(k, d, sequence):
kdmers = []
ind_k1 = 0
ind_k2 = k
ind_d1 = ind_k2 + d
ind_d2 = ind_d1 + k
while ind_d2 != (len(sequence) + 1):
kdmer1 = sequence[ind_k1: ind_k2]
kdmer2 = sequence[ind_d1: ind_d2]
tupla = (kdmer1, kdmer2)
kdmers.append(tupla)
kdmers.sort()
ind_k1 += 1
ind_k2 += 1
ind_d1 = ind_k2 + d
ind_d2 = ind_d1 + k
kdmer = "["
for i in range(len(kdmers)):
kdmer += kdmers[i][0]
kdmer += "|"
kdmer += kdmers[i][1]
if i != len(kdmers) - 1:
kdmer += ","
kdmer += "]"
file_name = "k" + str(k) + "d" + str(d) + "mer.txt"
file = open(file_name, 'w')
file.write(kdmer)
file.close()
dna, kd = ler_fasta("sequence.fasta")
k, d = replace_kd(kd)
sequence = ""
for i in dna:
sequence += i
gerar_arquivo(k, d, sequence)
| luchiago/bioinformatica | kdMer_old.py | kdMer_old.py | py | 1,589 | python | en | code | 1 | github-code | 13 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.