seq_id stringlengths 4 11 | text stringlengths 113 2.92M | repo_name stringlengths 4 125 ⌀ | sub_path stringlengths 3 214 | file_name stringlengths 3 160 | file_ext stringclasses 18
values | file_size_in_byte int64 113 2.92M | program_lang stringclasses 1
value | lang stringclasses 93
values | doc_type stringclasses 1
value | stars int64 0 179k ⌀ | dataset stringclasses 3
values | pt stringclasses 78
values |
|---|---|---|---|---|---|---|---|---|---|---|---|---|
7401140866 | import cv2
import pyautogui
import time
import numpy as np
import keyboard
from sentdex import PressKey, ReleaseKey, W, A, S, D
import imutils
import threading
CHARACTER_POSITION = [190, 301]
CAPTURE_AREA = ((433, 400), (950, 893))
QUIT = False # We loop in-game until this is set to True.
ALLOWED_KEYS = {W, A, S, D}
# Remove the key from the list of allowed keys for a given interval.
def hold_key(key):
global ALLOWED_KEYS
ALLOWED_KEYS.remove(key)
time.sleep(0.250)
ALLOWED_KEYS.add(key)
def terminate_program():
global QUIT
QUIT = True
exit(0)
# Get the center of different objects on the image.
def get_object_locations_from_image(img, object_pixels_x, object_pixels_y, min_radius):
mask = np.zeros(img.shape, dtype=np.uint8)
mask[object_pixels_y, object_pixels_x] = [255, 255, 255]
mask = cv2.dilate(mask, None, iterations=2)
mask = cv2.cvtColor(mask, cv2.COLOR_BGR2GRAY)
cnts = cv2.findContours(mask, cv2.RETR_EXTERNAL,cv2.CHAIN_APPROX_SIMPLE)
cnts = imutils.grab_contours(cnts)
coordinates = []
for c in cnts:
((x, y), radius) = cv2.minEnclosingCircle(c)
M = cv2.moments(c)
center = (int(M["m10"] / M["m00"]), int(M["m01"] / M["m00"]))
if radius > min_radius: #and center[0] > CHARACTER_POSITION[0]:
coordinates.append((center[0], center[1]))
return coordinates
keyboard.add_hotkey('c', terminate_program)
time.sleep(2)
while not QUIT:
img = np.array(pyautogui.screenshot())[CAPTURE_AREA[0][1]:CAPTURE_AREA[1][1], CAPTURE_AREA[0][0]:CAPTURE_AREA[1][0], :]
# Filter the red and yellow pixels from the image.
red_vertex_indices = np.where((img[:, :, 0] > 150) & (img[:, :, 1] < 40) & (img[:, :, 1] > 20) & (img[:, :, 2] > 40))
star_vertex_indices = np.where((img[:, :, 0] > 240) & (img[:, :, 1] > 230) & (img[:, :, 2] < 90))
y_coords_apple, x_coords_apple = red_vertex_indices
y_coords_star, x_coords_star = star_vertex_indices
# Get the center points of the objects.
apple_coordinates = get_object_locations_from_image(img, x_coords_apple, y_coords_apple, min_radius=20.5)
star_coordinates = get_object_locations_from_image(img, x_coords_star, y_coords_star, min_radius=13)
OBJECTS = []
# Calculate the distance of each object relative to the character's position.
for x_coord, y_coord in apple_coordinates + star_coordinates:
OBJECTS.append({"location": (x_coord, y_coord), "distance_horizontal": (x_coord - CHARACTER_POSITION[0])})
if len(OBJECTS) > 0:
closest_objective = min(OBJECTS, key=lambda x: x["distance_horizontal"])
x, y = closest_objective["location"]
horizontal_distance = closest_objective["distance_horizontal"]
vertical_distance = (y - CHARACTER_POSITION[1])
# We only move when the object is in a given radius of our character.
if horizontal_distance < 260 and vertical_distance > -200:
# If the object is behind our character:
if x < CHARACTER_POSITION[0]:
# If there are more objects, we decide if it is safe to focus on catching the star instead of slashing forward for example.
if len(OBJECTS) > 1:
temp = list(OBJECTS)
temp.remove(closest_objective)
second_closest_objective = min(temp, key=lambda x: x["distance_horizontal"])
condition = 3 * horizontal_distance < second_closest_objective["distance_horizontal"]
else:
condition = True
if vertical_distance < 30 and vertical_distance > - 100:
# If it is safe to catch the star:
if condition:
key = A
# We don't move if it is not safe to do so. Instead, we hold the 'A' key so that we can focus on the apples in the next iteration.
else:
threading.Thread(target=hold_key, args=(key,)).start()
continue
else:
continue
elif y < CHARACTER_POSITION[1] - 45:
key = W
elif y > CHARACTER_POSITION[1] + 45:
key = S
else:
key = D
if key in ALLOWED_KEYS:
threading.Thread(target=hold_key, args=(key,)).start()
PressKey(key)
ReleaseKey(key)
| automatingisfun/SnSSword | main.py | main.py | py | 4,620 | python | en | code | 0 | github-code | 50 |
19735161972 | import os
import pandas as pd
import numpy as np
import matplotlib
matplotlib.rcParams['pdf.fonttype'] = 42
matplotlib.rcParams['ps.fonttype'] = 42
matplotlib.rcParams['font.family'] = 'Arial'
import matplotlib.pyplot as plt
import seaborn as sns
from scipy import stats
from scipy.stats.stats import _ttest_finish
import scipy.spatial as sp, scipy.cluster.hierarchy as hc
import pickle
from dtw import *
## https://rtavenar.github.io/blog/dtw.html
######
# cluster of centers
# Calc DTW
day_orders = ["EV_longRNA_S3_D0_Rep1","EV_longRNA_S3_D0_Rep2","EV_longRNA_S3_D0_Rep3","EV_longRNA_S3_D1_Rep1","EV_longRNA_S3_D1_Rep2","EV_longRNA_S3_D1_Rep3",
"EV_longRNA_S3_D3_Rep1","EV_longRNA_S3_D3_Rep2","EV_longRNA_S3_D3_Rep3","EV_longRNA_S3_D5_Rep1","EV_longRNA_S3_D5_Rep2","EV_longRNA_S3_D5_Rep3",
"EV_longRNA_S3_D7_Rep1","EV_longRNA_S3_D7_Rep2","EV_longRNA_S3_D7_Rep3","EV_longRNA_S3_D9_Rep1","EV_longRNA_S3_D9_Rep2",
"EV_longRNA_S3_D11_Rep1","EV_longRNA_S3_D11_Rep2","EV_longRNA_S3_D11_Rep3","EV_longRNA_S3_D12_Rep1","EV_longRNA_S3_D12_Rep2","EV_longRNA_S3_D12_Rep3",
"EV_longRNA_S3_D14_Rep1","EV_longRNA_S3_D14_Rep2","EV_longRNA_S3_D14_Rep3","EV_longRNA_S3_D16_Rep1","EV_longRNA_S3_D16_Rep2","EV_longRNA_S3_D16_Rep3",
"EV_longRNA_S3_D18_Rep1","EV_longRNA_S3_D18_Rep2","EV_longRNA_S3_D18_Rep3","EV_longRNA_S3_D20_Rep1","EV_longRNA_S3_D20_Rep2","EV_longRNA_S3_D20_Rep3",
"EV_longRNA_S3_D22_Rep1","EV_longRNA_S3_D22_Rep2","EV_longRNA_S3_D22_Rep3","EV_longRNA_S3_D24_Rep1","EV_longRNA_S3_D24_Rep2","EV_longRNA_S3_D24_Rep3",
"EV_longRNA_S3_D26_Rep1","EV_longRNA_S3_D26_Rep2","EV_longRNA_S3_D26_Rep3","EV_longRNA_S3_D28_Rep1","EV_longRNA_S3_D28_Rep2","EV_longRNA_S3_D28_Rep3",
"EV_longRNA_S3_D30_Rep1","EV_longRNA_S3_D30_Rep2","EV_longRNA_S3_D30_Rep3"]
day_labels = ["D0_Rep1","D0_Rep2","D0_Rep3","D1_Rep1","D1_Rep2","D1_Rep3",
"D3_Rep1","D3_Rep2","D3_Rep3","D5_Rep1","D5_Rep2","D5_Rep3",
"D7_Rep1","D7_Rep2","D7_Rep3","D9_Rep1","D9_Rep2",
"D11_Rep1","D11_Rep2","D11_Rep3","D12_Rep1","D12_Rep2","D12_Rep3",
"D14_Rep1","D14_Rep2","D14_Rep3","D16_Rep1","D16_Rep2","D16_Rep3",
"D18_Rep1","D18_Rep2","D18_Rep3","D20_Rep1","D20_Rep2","D20_Rep3",
"D22_Rep1","D22_Rep2","D22_Rep3","D24_Rep1","D24_Rep2","D24_Rep3",
"D26_Rep1","D26_Rep2","D26_Rep3","D28_Rep1","D28_Rep2","D28_Rep3",
"D30_Rep1","D30_Rep2","D30_Rep3"]
output_folder = "MFuzz_cluster51/allgenes51_17gt001_100"
cluster_method="ward"
cluster_centers = pd.read_csv("../results/"+output_folder+"/mfuzz_cluster_centers.txt",index_col=0,header=0,sep="\t")
cluster_centers = cluster_centers.loc[:,day_orders]
cluster_centers.columns = day_labels
cluster_centers_scaled = cluster_centers.apply(lambda x: 2*(x-x.min())/(x.max()-x.min()) -1 ,axis=1)
###
n = cluster_centers_scaled.shape[0]
DTW_distance = np.empty((n, n))
for i in range(n):
for j in range(n):
x = dtw(cluster_centers_scaled.iloc[i,:], cluster_centers_scaled.iloc[j,:], keep_internals=True)
DTW_distance[i, j] = x.distance
pickle.dump(DTW_distance, open("../results/"+output_folder+"/DTW_distance.pkl", "wb"))
linkage = hc.linkage(sp.distance.squareform(DTW_distance), method=cluster_method, optimal_ordering=False)
g = sns.clustermap(cluster_centers_scaled,cmap="bwr", center=0, figsize=(18, 24),
col_cluster=False,row_linkage=linkage,yticklabels=True,dendrogram_ratio = (0.2,0.01))
ax = g.ax_heatmap
ax.set_ylabel("")
ax.set_xticklabels(labels = ax.get_xticklabels(), fontsize=9)
ax.set_yticklabels(labels = ax.get_yticklabels(), fontsize=10)
g.fig.subplots_adjust(right=0.78)
g.ax_cbar.set_position((0.85, .4, .03, .4))
plt.savefig("../results/"+output_folder+"/mfuzz_cluster_DTW_centers_"+cluster_method+"_fonttype42_Arial.pdf")
stop
####### Day20
ltD16_cols = ["EV_D0","EV_D1","EV_D3","EV_D5","EV_D7","EV_D9","EV_D11","EV_D12","EV_D14"]
gtD16_cols = ["EV_D16","EV_D18","EV_D20","EV_D22","EV_D24","EV_D26","EV_D28","EV_D30"]
gtD20_cols = ["EV_D20","EV_D22","EV_D24","EV_D26","EV_D28","EV_D30"]
cluster_centers_scaled = cluster_centers_scaled.loc[:,gtD20_cols]
n = cluster_centers_scaled.shape[0]
DTW_distance = np.empty((n, n))
for i in range(n):
for j in range(n):
x = dtw(cluster_centers_scaled.iloc[i,:], cluster_centers_scaled.iloc[j,:], keep_internals=True)
DTW_distance[i, j] = x.distance
pickle.dump(DTW_distance, open("../results/"+output_folder+"/DTW_distance_gtD20.pkl", "wb"))
linkage = hc.linkage(sp.distance.squareform(DTW_distance), method=cluster_method, optimal_ordering=False)
g = sns.clustermap(cluster_centers_scaled,cmap="vlag", center=0, figsize=(6, 8),
col_cluster=False,row_linkage=linkage,yticklabels=True,dendrogram_ratio = (0.2,0.01))
ax = g.ax_heatmap
ax.set_ylabel("")
ax.set_xticklabels(labels = ax.get_xticklabels(), fontsize=8)
ax.set_yticklabels(labels = ax.get_yticklabels(), fontsize=7)
g.fig.subplots_adjust(right=0.78)
g.ax_cbar.set_position((0.85, .4, .03, .4))
plt.savefig("../results/"+output_folder+"/mfuzz_cluster_centers_DTW_"+cluster_method+"_fonttype42_Arial_gtD20.pdf")
DTW_distance[15, 16]
stop
##########################
##miR
output_folder = "Cell_all_miR_8"
cluster_method="centroid"
cluster_centers = pd.read_csv("../results/miRNA/"+output_folder+"/mfuzz_cluster_centers.txt",index_col=0,header=0,sep="\t")
cluster_centers_scaled = cluster_centers.apply(lambda x: 2*(x-x.min())/(x.max()-x.min()) -1 ,axis=1)
###
n = cluster_centers_scaled.shape[0]
DTW_distance = np.empty((n, n))
for i in range(n):
for j in range(n):
x = dtw(cluster_centers_scaled.iloc[i,:], cluster_centers_scaled.iloc[j,:], keep_internals=True)
DTW_distance[i, j] = x.distance
pickle.dump(DTW_distance, open("../results/miRNA/"+output_folder+"/DTW_distance.pkl", "wb"))
linkage = hc.linkage(sp.distance.squareform(DTW_distance), method=cluster_method, optimal_ordering=False)
g = sns.clustermap(cluster_centers_scaled,cmap="bwr", center=0, figsize=(6, 8),
col_cluster=False,row_linkage=linkage,yticklabels=True,dendrogram_ratio = (0.2,0.01))
ax = g.ax_heatmap
ax.set_ylabel("")
ax.set_xticklabels(labels = ax.get_xticklabels(), fontsize=8)
ax.set_yticklabels(labels = ax.get_yticklabels(), fontsize=7)
g.fig.subplots_adjust(right=0.78)
g.ax_cbar.set_position((0.85, .4, .03, .4))
plt.savefig("../results/miRNA/"+output_folder+"/mfuzz_cluster_centers_DTW_"+cluster_method+"_fonttype42_Arial.pdf")
stop
##########################
## calculate the correlations
## fast spearman
def spearman_corr_fast(mat_a, mat_b):
a = mat_a.rank(1).values
b = mat_b.rank(1).values
n,k = a.shape
m,k = b.shape
mu_a = np.mean(a,axis=1)
mu_b = np.mean(b,axis=1)
sig_a = np.std(a,axis=1)
sig_b = np.std(b,axis=1)
out = np.empty((n, m))
out[:] = 0
for i in range(n):
if i % 1000 == 0:
print(i)
for j in range(i+1,m):
out[i, j] = (a[i] - mu_a[i]) @ (b[j] - mu_b[j]) / k / sig_a[i] / sig_b[j]
return out
def spearman_pval_2tail(n_obs, r):
dof = n_obs - 2
t = r * np.sqrt((dof/((r+1.0)*(1.0-r))).clip(0))
return _ttest_finish(dof, t, "two-sided")
## background
# read the expression data
EV_expression = pd.read_csv("../results/EV_gene_expr_avg_8points_gt001.tsv",index_col=0,header=0,sep="\t")
if not os.path.exists("../results/cluster/allgenes_rm_low_8points_gt001/EV_corrs_expr_avg_8points_gt001.pkl"):
EV_corrs = spearman_corr_fast(EV_expression,EV_expression)
pickle.dump( EV_corrs, open("../results/cluster/allgenes_rm_low_8points_gt001/EV_corrs_expr_avg_8points_gt001.pkl", "wb"))
EV_corrs_df = pd.DataFrame(data=EV_corrs, index=EV_expression.index, columns=EV_expression.index)
EV_corrs_df.to_csv("../results/cluster/EV_correlation_matrix_8points_gt001.txt", sep="\t")
else:
EV_corrs = pickle.load(open("../results/cluster/allgenes_rm_low_8points_gt001/EV_corrs_expr_avg_8points_gt001.pkl", "rb" ))
## for each cluster to calc the corr
for i in range(100):
print("==============")
print("cluster-->",i)
cluster_i = pd.read_csv("../results/cluster/allgenes_rm_low_8points_gt001/gene_list_"+str(i+1)+".txt",sep="\t")
cluster_i_expr = EV_expression.loc[cluster_i.index,:]
cluster_i_corrs = spearman_corr_fast(cluster_i_expr, cluster_i_expr)
pickle.dump(cluster_i_corrs, open("../results/cluster/allgenes_rm_low_8points_gt001/cluster_corrs_"+str(i+1)+".pkl", "wb"))
cluster_i_corrs_df = pd.DataFrame(data=cluster_i_corrs, index=cluster_i_expr.index, columns=cluster_i_expr.index)
cluster_i_corrs_df.to_csv("../results/cluster/allgenes_rm_low_8points_gt001/cluster_corrs_DF_"+str(i+1)+".txt", sep="\t")
background = []
EV_corrs_ls = EV_corrs.ravel()
for i in range(0,EV_corrs.shape[0]):
background += list(EV_corrs[i,i+1:])
p_val = {}
for i in range(100):
print("==============")
print("cluster-->",i)
cluster_i_corrs = []
cluster_i_corrs_matrix = pickle.load(open("../results/cluster/allgenes_rm_low_8points_gt001/cluster_corrs_" + str(i + 1) + ".pkl", "rb"))
for j in range(0, cluster_i_corrs_matrix.shape[0]):
cluster_i_corrs += list(cluster_i_corrs_matrix[j, j + 1:])
t, p = stats.ttest_ind(background, cluster_i_corrs)
p_val[i] = p
print("p=",p)
pickle.dump( p_val, open("../results/cluster/allgenes_rm_low_8points_gt001/EV_cluster_pval.pkl", "wb"))
background_random = np.random.choice(background, size=50000,replace=False)
sns.histplot(background_random,bins=100,color="#619cff")
sns.histplot(cluster_i_corrs,bins=100,color="#a9d3b3")
| huruifeng/PD-MAP | EV/clustering/11_clusters_center_DTW.py | 11_clusters_center_DTW.py | py | 9,755 | python | en | code | 0 | github-code | 50 |
42409715469 | # -*- coding: utf-8 -*-
"""
Created on Tue Oct 19 14:00:47 2021
@author: LENOVO
"""
import pygame
import numpy as np
import random
import time
from enum import Enum
pygame.init()
class Direction(Enum):
RIGHT = 1
LEFT = 2
NONE = 3
WHITE = (255, 255, 255)
BLACK = (0, 0, 0)
font = pygame.font.SysFont('airal', 25)
font_init = pygame.font.SysFont('airal', 75)
font1 = pygame.font.SysFont('airal', 25)
car_main = pygame.image.load('car1.png')
car_obs_1 = pygame.image.load('car2.png')
car_obs_2 = pygame.image.load('car3.png')
car_obs_3 = pygame.image.load('car4.png')
car_main_rec = car_main.get_rect()
car_obs_rec_1 = car_obs_1.get_rect()
car_obs_rec_2 = car_obs_2.get_rect()
car_obs_rec_3 = car_obs_3.get_rect()
road = pygame.image.load('road.png')
car_height = 100
car_width = 50
offset = 2
step_size = 5
class CarGame:
def __init__(self, w=640, h=640):
self.h = h
self.w = w
# init display
self.display = pygame.display.set_mode((self.w, self.h))
pygame.display.set_caption('Snake')
self.clock = pygame.time.Clock()
# init game state
car_main_rec.x = self.w/2 - car_width/2 + offset
car_main_rec.y = self.h - car_height
car_obs_rec_1.x, car_obs_rec_2.x, car_obs_rec_3.x = 217, 297, 372
car_obs_rec_1.y, car_obs_rec_2.y, car_obs_rec_3.y = -100, -100, -100
self.car_gamer = [car_main_rec.x, car_main_rec.y]
self.car_obs_pos = [[car_obs_rec_1.x, car_obs_rec_1.y], [car_obs_rec_2.x, car_obs_rec_2.y], [car_obs_rec_3.x, car_obs_rec_3.y]]
self.direction = Direction.NONE
self.score = 0
self.state = 1
self.state_word = ['Left', 'Middle', 'Right']
self.car_obs = [car_obs_1, car_obs_2, car_obs_3]
self.x_pos = [217, 297, 372]
self.game_started = False
self._reset_states()
def play_step(self):
# user input
for event in pygame.event.get():
if event.type == pygame.QUIT:
pygame.quit()
quit()
if event.type == pygame.KEYDOWN:
self.game_started = True
if event.key == pygame.K_LEFT:
self.direction = Direction.LEFT
elif event.key == pygame.K_RIGHT:
self.direction = Direction.RIGHT
self._move(self.direction)
self.car_position()
# check if game over
game_over = False
if self._is_collision():
game_over = True
text = font_init.render("Game Over", True, WHITE)
self.display.blit(text, [self.w/3.5,self.h/3.5])
pygame.display.flip()
time.sleep(3)
return game_over, self.score
# Add new cars
for i in range(0, 3):
if self.car_obs_pos[i][1] > self.h:
self.score += 1
self.car_obs = random.sample(self.car_obs, len(self.car_obs))
self._reset_states()
# update ui and clock
self._update_ui()
self.clock.tick(min(self.score + step_size*5, 60))
# return game over and score
return game_over, self.score
def car_position(self):
if not self.game_started:
for i in self.car_list:
self.car_obs_pos[i] = [self.x_pos[i], -100]
if self.game_started:
for i in self.car_list:
self._move_car(i, self.car_obs_pos[i])
def _car_load(self, car, cord):
self.display.blit(car, (cord[0], cord[1]))
def _update_ui(self):
self.display.fill(WHITE)
self.display.blit(road, (0,0))
if not self.game_started:
text = font_init.render("Start Game", True, WHITE)
self.display.blit(text, [self.w/3.5,self.h/3.5])
text = font1.render("Avoid hitting other cars", True, WHITE)
self.display.blit(text, [self.w/3,self.h/2.5])
text = font1.render("Score: " + str(self.score), True, BLACK)
self.display.blit(text, [10,10])
text = font1.render("Lane: " + self.state_word[self.state], True, BLACK)
self.display.blit(text, [10,30])
self._car_load(car_main, self.car_gamer)
self._car_load(self.car_obs[0], self.car_obs_pos[0])
self._car_load(self.car_obs[1], self.car_obs_pos[1])
self._car_load(self.car_obs[2], self.car_obs_pos[2])
pygame.display.flip()
def _move(self, direction):
x = self.car_gamer[0]
y = self.car_gamer[1]
if direction == Direction.RIGHT and x < 368:
x += step_size
car_main_rec.x += step_size
if x == 297:
self.direction = Direction.NONE
self.state = 1
if x == 372:
self.state = 2
elif direction == Direction.LEFT and x >= 221:
x -= step_size
car_main_rec.x -= step_size
if x == 297:
self.direction = Direction.NONE
self.state = 1
if x == 217:
self.state = 0
elif direction == Direction.NONE:
pass
self.car_gamer = [x, y]
def _move_car(self, num, position):
y = position[1]
y += step_size
self.car_obs_pos[num] = [position[0], y]
if num == 0:
car_obs_rec_1.y += step_size
if num == 1:
car_obs_rec_2.y += step_size
if num == 2:
car_obs_rec_3.y += step_size
def _reset_states(self):
for i in range(0, 3):
self.car_obs_pos[i] = [self.x_pos[i], -100]
car_obs_rec_1.x, car_obs_rec_2.x, car_obs_rec_3.x = 217, 297, 372
car_obs_rec_1.y, car_obs_rec_2.y, car_obs_rec_3.y = -100, -100, -100
self.car_gamer = [car_main_rec.x, car_main_rec.y]
self.car_obs_pos = [[car_obs_rec_1.x, car_obs_rec_1.y], [car_obs_rec_2.x, car_obs_rec_2.y], [car_obs_rec_3.x, car_obs_rec_3.y]]
self.car_list = []
for i in range(0, np.random.randint(2) + 1):
self.car_list.append(np.random.randint(3))
def _is_collision(self):
if car_main_rec.colliderect(car_obs_rec_1) or car_main_rec.colliderect(car_obs_rec_2) or car_main_rec.colliderect(car_obs_rec_3):
return True
return False
if __name__ == '__main__':
game = CarGame()
# game loop
while True:
game_over, score = game.play_step()
if game_over:
break
pygame.quit() | sagarkalburgi/Games | python_games/Car_game/Cargame.py | Cargame.py | py | 6,794 | python | en | code | 0 | github-code | 50 |
70865877594 | #!/usr/bin/env python3
import numpy as np
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
import pandas as pd
import os
import sys
fileDir = sys.argv[1]
x = sys.argv[2]
y = sys.argv[3]
z = sys.argv[4]
data = pd.read_csv(fileDir, engine = 'c', float_precision = 'round_trip', dtype=np.float64)
dataTop = data.drop_duplicates(subset=[x,y], keep='first', inplace=False)
XTop = dataTop[x]
YTop = dataTop[y]
ZTop = dataTop[z]
dataMid = data.drop_duplicates(subset=[x,y], keep=False, inplace=False)
XMid = dataMid[x]
YMid = dataMid[y]
ZMid = dataMid[z]
dataBottom = data.drop_duplicates(subset=[x,y], keep='last', inplace=False)
XBottom = dataBottom[x]
YBottom = dataBottom[y]
ZBottom = dataBottom[z]
fig = plt.figure(figsize=(11.5, 8.5))
ax = fig.add_subplot(111, projection='3d')
ax.plot_trisurf(XTop, YTop, ZTop, cmap='viridis', alpha=0.5)
ax.plot_trisurf(XMid, YMid, ZMid, cmap='viridis', alpha=0.5)
ax.plot_trisurf(XBottom, YBottom, ZBottom, cmap='viridis', alpha=0.5)
plt.xlabel("p")
plt.ylabel("q")
plt.title("Average Hitting Time")
plt.show()
| jackbergus/NCL_CSC3232 | python/02_markov/plot3d_average_hitting_time.py | plot3d_average_hitting_time.py | py | 1,078 | python | en | code | 0 | github-code | 50 |
32968574047 | """
# Regular Expression:
find the pattern(string or numbers) in raw string
Methods:
- match: it will match first pattern in starts of the string
- search: it will search return single matched pattern in entire string
- findall: it will return all matched patterns in the string
- sub: whenever matches it will substitute the pattern
- split: whenever pattern matches it will split
---
Pattern conditions
------------------
[0-9] --> it will return single matched number
\d --> it will return single matched number
[a-zA-Z] --> it will return single matched charector
\w --> it will return single matched charectr [a-zA-Z0-9]
^ --> it is used for start of the string
$ --> it is used for end of the pattern
{} --> {1,} how many characters or numbers present in string
. --> single characters return
\s --> single space
* --> 0 or more occurances
+ --> 1 or more occurences
# match: it will match start of the string
# re.match(pattern,string)
#search: it will search not only start of the string, and also it will search middle but
# it will return single
# re.search(pattern,string)
# find all: it will return all matched patterns
# re.findall("pattern",str)
# sub: it will substitute when pattern matches
# re.sub(pattern,sub)
# split: based pattern will split
d = re.split("\s",x)
"""
import re
x = "thirumala reddy thirumala reddy thirumala"
"""
# match: it will match start of the string
# re.match(pattern,string)
data = re.match("thirumala",x)
print(data.group())"""
#search: it will search not only start of the string, and also it will search middle but
# it will return single
"""
# re.search(pattern,string)
data = re.search("thirumala",x)
print(data.group())
"""
# find all: it will return all matched patterns
"""
# re.findall("pattern",str)
d = re.findall("thirumala", x)
print(d)
"""
# sub: it will substitute when pattern matches
'''
# re.sub(pattern,sub)
d = re.sub("thirumala","akhil", x)
print(d)
'''
# split: based pattern will split
"""
d = re.split("\s",x)
print(d)
print(x.split())
"""
xs = "CVAPB4576M CVADB4576^ C#ADB4576MS CV23TB4576M CVAKB4576M CVAGB4576M CVAOB4576M"
# data = re.findall("[A-Z]{5}[0-9]{4}[A-Z]]",xs)
# data = re.findall("[A-Z]{2}\d{2}\s[A-Z]",xs)
# data = re.findall("[A-Z]{5}[0-9]{4}.",xs)
data = re.findall("\w{5}[0-9]{4}\w",xs)
print(data)
# [A-Z]{2}\d{2}[A-Z]
| tiru777/pythonbatch2 | class21.py | class21.py | py | 2,370 | python | en | code | 1 | github-code | 50 |
23576460451 | try:
from setuptools import setup
except ImportError:
from distutils.core import setup
config = {
'name': 'python-mal',
'description': 'Provides programmatic access to MyAnimeList resources.',
'author': 'Shal Dengeki',
'license': 'LICENSE.txt',
'url': 'https://github.com/shaldengeki/python-mal',
'download_url': 'https://github.com/shaldengeki/python-mal/archive/master.zip',
'author_email': 'shaldengeki@gmail.com',
'version': '0.1.7',
'install_requires': ['beautifulsoup4', 'requests', 'pytz', 'lxml'],
'tests_require': ['nose'],
'packages': ['myanimelist']
}
setup(**config) | shaldengeki/python-mal | setup.py | setup.py | py | 606 | python | en | code | 16 | github-code | 50 |
2923237636 | import torch
from clustering.utils import Confusion
from sklearn.cluster import KMeans
import numpy as np
from sklearn.metrics.pairwise import cosine_similarity as cosine
from sklearn.metrics import silhouette_score
from sklearn import preprocessing
def get_kmeans(all_features, all_labels, num_classes):
all_features = all_features.numpy()
all_features = preprocessing.normalize(all_features)
print('Clustering with kmeans...')
# Perform kmean clustering
confusion = Confusion(num_classes)
clustering_model = KMeans(n_clusters=num_classes)
clustering_model.fit(all_features)
cluster_assignment = clustering_model.labels_
score_factor = np.matmul(all_features, clustering_model.cluster_centers_.transpose())
score_cosine = cosine(all_features, clustering_model.cluster_centers_)
if all_labels is None:
return score_factor, score_cosine, clustering_model.cluster_centers_
true_labels = all_labels
pred_labels = torch.tensor(cluster_assignment)
print("all_embeddings:{}, centers:{}, true_labels:{}, pred_labels:{}".format(all_features.shape, clustering_model.cluster_centers_.shape, len(true_labels), len(pred_labels)))
confusion.add(pred_labels, true_labels)
confusion.optimal_assignment(num_classes)
confusion_factor = Confusion(num_classes)
score_factor = np.matmul(all_features, clustering_model.cluster_centers_.transpose())
pred_labels_factor = score_factor.argmax(axis=-1)
pred_labels_factor = torch.tensor(pred_labels_factor)
confusion_factor.add(pred_labels_factor, true_labels)
confusion_factor.optimal_assignment(num_classes)
confusion_cosine = Confusion(num_classes)
score_cosine = cosine(all_features, clustering_model.cluster_centers_)
pred_labels_cosine = score_cosine.argmax(axis=-1)
pred_labels_cosine = torch.tensor(pred_labels_cosine)
confusion_cosine.add(pred_labels_cosine, true_labels)
confusion_cosine.optimal_assignment(num_classes)
print("Clustering iterations:{}, L2 ACC:{:.3f}, Inner ACC:{:.3f}, Cosine ACC:{:.3f}".format(clustering_model.n_iter_, confusion.acc(), confusion_factor.acc(), confusion_cosine.acc()))
print('L2 Clustering scores:',confusion.clusterscores())
print('Inner Clustering scores:',confusion_factor.clusterscores())
print('Cosine Clustering scores:',confusion_cosine.clusterscores())
return score_factor, score_cosine, clustering_model.cluster_centers_
def get_kmeans_centers(all_features, all_labels, num_classes):
_, _, centers = get_kmeans(all_features, all_labels, num_classes)
return centers
def get_kmeans_prediction_and_centers(all_features, all_labels, num_classes):
_, score_cosine, centers = get_kmeans(all_features, all_labels, num_classes)
pred_labels_cosine = score_cosine.argmax(axis=-1)
return pred_labels_cosine, centers
def get_metric(features, centers, labels, num_classes):
normalized_features = preprocessing.normalize(np.concatenate((centers, features), axis=0))
centers, features = normalized_features[:num_classes], normalized_features[num_classes:]
confusion_factor = Confusion(num_classes)
score_factor = np.matmul(features, centers.transpose())
pred_labels_factor = score_factor.argmax(axis=-1)
pred_labels_factor = torch.tensor(pred_labels_factor)
confusion_factor.add(pred_labels_factor, labels)
confusion_factor.optimal_assignment(num_classes)
confusion_cosine = Confusion(num_classes)
score_cosine = cosine(features, centers)
pred_labels_cosine = score_cosine.argmax(axis=-1)
pred_labels_cosine = torch.tensor(pred_labels_cosine)
confusion_cosine.add(pred_labels_cosine, labels)
confusion_cosine.optimal_assignment(num_classes)
print("Inner ACC:{:.3f}, Cosine ACC:{:.3f}".format(confusion_factor.acc(), confusion_cosine.acc()))
print('Inner Clustering scores:', confusion_factor.clusterscores())
print('Cosine Clustering scores:',confusion_cosine.clusterscores())
def get_kmeans_score(all_features, num_classes):
all_features = all_features.numpy()
all_features = preprocessing.normalize(all_features)
clustering_model = KMeans(n_clusters=num_classes)
labels = clustering_model.fit_predict(all_features)
silhouette = silhouette_score(all_features, labels)
return silhouette
| JiachengLi1995/UCTopic | clustering/kmeans.py | kmeans.py | py | 4,342 | python | en | code | 40 | github-code | 50 |
9435429156 | #nome do vendedor (string), salário (float) e montante total vendas (float)
"""nome = str(input())
salario = float(input())
total_vendas = float(input())
total = total_vendas*0,15
salario1 = float(total + salario)
print(f"TOTAL = R$ {salario1:.2f}")"""
nome = input()
salario = float(input())
vendas = float(input())
comissao = (vendas*0.15)
salario=(salario+comissao)
print(f"TOTAL = R$ {salario:.2f}")
| irisjulia/desafios-python | desafio10.py | desafio10.py | py | 413 | python | pt | code | 0 | github-code | 50 |
5662279018 | import os
import glob
import shutil
from tqdm import tqdm
# Défini chemin des images
image_folder_path = "../../public/random_avatar/"
# Sélectionne ‘*.png’
files = glob.glob(image_folder_path + '/*.png')
# Ajoute le chemin du fichier user à la liste des fichiers à supprimer
user_file_path = "../../app/api/user/"
files.append(user_file_path)
# Crée une barre de progression
progress_bar = tqdm(total=len(files), desc="Deleting files")
for f in files:
try:
# Vérifie si f est un répertoire
if os.path.isdir(f):
# Supprime le répertoire
shutil.rmtree(f)
else:
# Supprime le fichier
os.remove(f)
print(f'Un fichier a été supprimé : {f}')
# Met à jour la barre de progression
progress_bar.update(1)
except OSError as e:
print(f'Erreur : {f} : {e}')
# Ferme la barre de progression
progress_bar.close()
| chambrin/random-user-generator-api | script/Generator-user/purge.py | purge.py | py | 936 | python | fr | code | 0 | github-code | 50 |
31834991871 | import random
from PIL import Image
import requests
import sys
import vlc
import time
sound_file = vlc.MediaPlayer("file:///Users/terribroughton/Desktop/pokemonmusic.mp3")
sound_file.play()
time.sleep(5)
# Delay printing
def delay_print(s):
# print one character at a time
# https://stackoverflow.com/questions/9246076/how-to-print-one-character-at-a-time-on-one-line
for c in s:
sys.stdout.write(c)
sys.stdout.flush()
time.sleep(0.03)
delay_print("Welcome to the Pokemon Top Trumps Game!! ")
new_player_name = input('What is the name of the new player?: ')
delay_print("Hello " + new_player_name + ", get ready for the Pokemon Battle!\n")
computername = " The Computer"
delay_print("You are playing against" + computername + ", get ready...\n")
with open('ranking.txt', 'r') as text_file:
ranking = text_file.read()
ranking = ranking + new_player_name + '\n'
with open('ranking.txt', 'w+') as text_file:
text_file.write(ranking)
def random_pokemon():
pokemon_number = random.randint(1,151)
delay_print("random number between 1 and 151 is % d" % (pokemon_number))
url = 'https://pokeapi.co/api/v2/pokemon/{}/'.format(pokemon_number)
response = requests.get(url)
pokemon = response.json()
return pokemon
def run():
my_pokemon = random_pokemon()
delay_print(' , You were given {}'.format(my_pokemon['name'] ) )
stat_choice = input(' , Which stat do you want to use? (id, height, weight, base_experience, order,) ' )
opponent_pokemon = random_pokemon()
delay_print(' , The opponent chose {}'.format(opponent_pokemon['name'] ) )
my_stat = my_pokemon[stat_choice]
opponent_stat = opponent_pokemon[stat_choice]
if my_stat > opponent_stat:
delay_print('...........You Win!')
winner_img = Image.open('winner.jpg')
winner_img.show()
elif my_stat < opponent_stat:
delay_print('.........You Lose!')
loser2_img = Image.open('loser2.jpg')
loser2_img.show()
else :
delay_print('.........Draw!')
draw_img = Image.open('draw.jpg')
draw_img.show()
run()
| Terrib96/Pokemon-game | Projectpoke.py | Projectpoke.py | py | 2,168 | python | en | code | 0 | github-code | 50 |
41699431692 | import pygame
import os
from pygame.locals import *
import STATE
import sys
image = pygame.image.load(os.path.join('assets','hero1.png'))
image = pygame.transform.scale(image, (32, 32))
x = 0
y = 0
def take_turn():
global x, y
# check for key presses
# This is where we determine what the player does
# when keys are pressed.
# For more information, try asking ChatGPT the following:
# "What is the pygame event queue?"
# "What is the pygame event type?"
# "What is the pygame event key?"
# "How do I make a sprite move faster when shift is pressed down?"
for event in pygame.event.get():
for event in pygame.event.get():
if event.type == QUIT:
pygame.quit()
sys.exit()
elif event.type == KEYDOWN:
if event.key == K_LEFT:
x -= 5
elif event.key == K_RIGHT:
x += 5
elif event.key == K_UP:
y -= 5
elif event.key == K_DOWN:
y += 5
STATE.SCREEN.blit(image, (x, y)) | gavinraym/alphaPygame | dev_temp/lab/player.py | player.py | py | 1,151 | python | en | code | 0 | github-code | 50 |
13910155274 | from pyautocad import Autocad,APoint,ACAD
import numpy as np
import pyptlist
import random
import time
acad=Autocad(create_if_not_exists=True)
def getpoint():#从cad中获得点
acad.doc.SetVariable('pdmode', 2)
n = acad.doc.Utility.GetInteger('请输入点的个数(至少3个):')
if n < 3:
acad.prompt('输入点数量有误,程序终止')
return
ptlist = []
for i in range(n):
prompt1 = "请输入第%d/%d个点" % (i + 1, n)
acad.prompt(prompt1)
while True: # 遇到呼叫错误,不断重复尝试。
try:
pt = acad.doc.Utility.GetPoint()
except Exception:
time.sleep(0.2)
acad.doc.Regen(ACAD.acActiveViewport)
#acad.prompt(prompt1)
print('呼叫错误,重试')
else:
break
time.sleep(0.1)
pt = APoint(pt)
acad.model.AddPoint(pt)
ptlist.append(pt)
while True:
closed = acad.doc.Utility.GetString(0, '\n曲线是否闭合Y(闭合)/N(不闭合)?默认Y')
if closed == '' or closed.lower() == 'y':
tagclosed = True
break
elif closed.lower() == 'n':
tagclosed = False
break
else:
acad.prompt('输入有误,请重新输入!')
return ptlist,tagclosed
def job1():
ptlist,tagclosed=getpoint()
myptlist=pyptlist.Polygon(ptlist)
coor=myptlist.Akima(tagclosed)
en=acad.model.AddLightWeightPolyline(np.float_(coor))
en.Color=ACAD.acBlue
def job2():
ptlist,tagclosed=getpoint()
myptlist = pyptlist.Polygon(ptlist)
coor = myptlist.Poly2interpolation(tagclosed)
en = acad.model.AddLightWeightPolyline(np.float_(coor))
en.Color = ACAD.acRed
def jobcombine():
ptlist,tagclosed=getpoint()
myptlist=pyptlist.Polygon(ptlist)
coor=myptlist.Akima(tagclosed)
en=acad.model.AddLightWeightPolyline(np.float_(coor))
en.Color=ACAD.acBlue
coor = myptlist.Poly2interpolation(tagclosed)
en = acad.model.AddLightWeightPolyline(np.float_(coor))
en.Color = ACAD.acRed
if __name__=="__main__":
jobcombine() | YU6326/YU6326.github.io | code/curvenew.py | curvenew.py | py | 2,196 | python | en | code | 6 | github-code | 50 |
26217989458 | import streamlit as st
import openai
st.set_page_config(page_title="Chat GPT", page_icon=":crown:", layout="wide")
# ---- Header ----
def main():
st.session_state.setdefault("logs", [])
if __name__ == "__main__":
main()
st.header("Chat GPT")
st.subheader("""
HI :wave:,
I am Mohamed Arafath an AIML student at SRM Institue of Science and Technology.
I am a Jack of all cards!
I know Machine Learning and Intrested to learn more about web developement""")
st.title("This is my [GitHub](https://github.com/MohamedArafath205) follow me!")
title = st.text_input("Ask me anything...", key="input")
if "logs" not in st.session_state:
st.session_state.logs = []
openai.api_key = "YOUR_API_KEY"
if(title != ""):
with st.spinner("Generating response..."):
response = openai.Completion.create(
model="text-davinci-003",
prompt=title,
temperature=0,
max_tokens=60,
top_p=1,
frequency_penalty=0.5,
presence_penalty=0
)
if st.button('Enter'):
bot_response = response.choices[0].text.strip()
st.session_state.logs.append(bot_response)
message_box = f"<div style='background-color:#f2f2f2; padding:10px; border-radius:10px; margin-bottom:10px; height:200px; overflow-y:scroll; color:black;'>{bot_response}</div>"
st.markdown(message_box, unsafe_allow_html=True)
input_text = ""
if st.button("Clear conversation"):
st.session_state.logs = []
bot_logs = [log for log in st.session_state.logs if log != ""]
if bot_logs:
with st.expander("Conversation History"):
conversation_history = "\n\n".join(bot_logs[::-1])
st.text_area(" ", value=conversation_history, height=500)
| kpister/prompt-linter | data/scraping/repos/MohamedArafath205~Chat-GPT/app.py | app.py | py | 1,848 | python | en | code | 0 | github-code | 50 |
34978948199 | import socket
from fdp import ForzaDataPacket
from matplotlib import pyplot
import time
# Create a UDP socket
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
# Bind the socket to the port
server_address = ("127.0.0.1", 1010)
s.bind(server_address)
print("Do Ctrl+c to exit the program !!")
props = []
arr = []
t_max = time.time() + 30
props = ["speed"]
# accel, brake, handbrake -> 1 or 0
# current_engine_rpm [10 000], power [500 000]
# speed [200], race_pos [0,10]
for i in range(len(props)):
arr.append([])
while time.time() < t_max:
print("####### Server is listening #######")
data, address = s.recvfrom(1024)
df = ForzaDataPacket(data, "fh4")
d = df.to_list(props)
for i in range(len(d)):
arr[i].append(d[i])
for i in range(len(props)):
pyplot.plot(list(range(len(arr[i]))), arr[i], label=props[i])
pyplot.legend()
pyplot.show() | drosoCode/drosocode.github.io | content/posts/upgrading-your-rgb-with-wled-aurora-and-openrgb/test.py | test.py | py | 881 | python | en | code | 0 | github-code | 50 |
20974794669 | #WAP to input a number & print the reverse of a number & also print it's ones digit.
num = int(input("Enter a number: "))
rev = 0
while(num>0):
rem = num % 10
rev = (rev * 10) + rem
num = num//10
a = rev%10
print("The reverse number is : ",rev)
print("It's ones digit is : ",a)
| reyagarg13/Reya-Python-for-beginners | reverse of a number & also print it's ones digit..py | reverse of a number & also print it's ones digit..py | py | 296 | python | en | code | 0 | github-code | 50 |
18566249446 | # This function is written to get list of EC2 instances as Dictionary Object in Python
import boto3
ec2 = boto3.client('ec2',region_name="us-east-1")
ec2_dict=ec2.describe_instances()
print("ec2_dict type is",type(ec2_dict))
print("ec2_dict is",ec2_dict)
# https://boto3.amazonaws.com/v1/documentation/api/latest/reference/services/ec2.html#EC2.Client.start_instances
# Below is the Example Python Dictionary response for 'ec2.describe_instances()'
'''
test_dict=
{
"Version": "2012-10-17",
"Statement": [
{
"Sid": "VisualEditor0",
"Effect": "Allow",
"Action": [
"s3:ListBucketByTags",
"s3:GetBucketLocation",
"s3:GetObjectVersion"
],
"Resource": [
"arn:aws:s3:::aws-devops-testbucket",
"arn:aws:s3:::aws-devops-testbucket/*"
]
},
{
"Sid": "VisualEditor1",
"Effect": "Allow",
"Action": [
"s3:GetAccountPublicAccessBlock",
"s3:HeadBucket"
],
"Resource": "*"
}
]
}
instance_id=instances['Instances'][0]['InstanceId']
instance_state=instances['Instances'][0]['State']['Name']
instance_tags=instances['Instances'][0]['Tags']
InstanceId_Value = ec2_dictionary['Reservations'][0]['Instances'][0]['InstanceId']
InstanceState_Value = ec2_dictionary['Reservations'][0]['Instances'][0]['State']['Name']
instance_id = ec2_dictionary['Reservations'][0]['Instances'][0]['InstanceId']
instance_state = ec2_dictionary['Reservations'][0]['Instances'][0]['State']['Name']
ec2_dictionary['Reservations'][0]['Instances'][0]['InstanceId']
ec2_dictionary['Reservations'][0]['Instances'][0]['State']['Name']
ec2_dictionary['Reservations'][0]['Instances'][0]['State']['Name']
test_dir = {
"Reservations": [
{"Instances1":['one'], 'Groups1': ['two'] },
{"Instances2":['three'], 'Groups2': ['four'] },
{"Instances3":[], 'Groups3': [] }
]
}
test_dir['Reservations'][0]['Instances1'][0] => 'one'
test_dir['Reservations'] => List => [{"Instances1":[], 'Groups1': [] },{"Instances2":[], 'Groups2': [] },{"Instances3":[], 'Groups3': [] }]
test_dir['Reservations'][0] => {"Instances1":[], 'Groups1': [] }
test_dir['Reservations'][0]['Instances1'] => []
instance_id = ec2_dictionary['Reservations'][0]['Instances'][0]['InstanceId']
instance_state = ec2_dictionary['Reservations'][0]['Instances'][0]['State']['Name']
ec2_dictionary=
{
'Reservations':
[
{
'Groups': [],
'Instances': [{
'AmiLaunchIndex': 0,
'ImageId': 'ami-08f63db601b82ff5f',
'InstanceId': 'i-0f998cd8f0c4d7765',
'InstanceType': 't2.micro',
'KeyName': 'aws-linux-mumbai',
'LaunchTime': datetime.datetime(2020, 12, 13, 3, 54, 1, tzinfo = tzlocal()),
'Monitoring': {
'State': 'disabled'
},
'PrivateDnsName': 'ip-172-31-29-10.ap-south-1.compute.internal',
'PrivateIpAddress': '172.31.29.10',
'ProductCodes': [],
'PublicDnsName': 'ec2-13-233-149-161.ap-south-1.compute.amazonaws.com',
'PublicIpAddress': '13.233.149.161',
'State': {
'Code': 16,
'Name': 'running'
},
'StateTransitionReason': '',
'SubnetId': 'subnet-763c651f',
'VpcId': 'vpc-b680d3df',
'Architecture': 'x86_64',
'IamInstanceProfile': {
'Arn': 'arn:aws:iam::082923708139:instance-profile/EC2-AWS-CICD-Roile',
'Id': 'AIPARGTVDL3VW3O4VSDK6'
},
'NetworkInterfaces': [{
'Association': {
'IpOwnerId': 'amazon',
'PublicDnsName': 'ec2-13-233-149-161.ap-south-1.compute.amazonaws.com',
'PublicIp': '13.233.149.161'
},
'Attachment': {
'AttachTime': datetime.datetime(2020, 12, 5, 4, 24, 7, tzinfo = tzlocal()),
'AttachmentId': 'eni-attach-08cfd5390b05b478a',
'DeleteOnTermination': True,
'DeviceIndex': 0,
'Status': 'attached'
},
'Description': '',
'Groups': [{
'GroupName': 'launch-wizard-2',
'GroupId': 'sg-068aab378fd4baf27'
}],
'Ipv6Addresses': [],
'MacAddress': '02:ce:d0:5a:79:d0',
'NetworkInterfaceId': 'eni-0d047c9183d815761',
'OwnerId': '082923708139',
'PrivateDnsName': 'ip-172-31-29-10.ap-south-1.compute.internal',
'PrivateIpAddress': '172.31.29.10',
'PrivateIpAddresses': [{
'Association': {
'IpOwnerId': 'amazon',
'PublicDnsName': 'ec2-13-233-149-161.ap-south-1.compute.amazonaws.com',
'PublicIp': '13.233.149.161'
},
'Primary': True,
'PrivateDnsName': 'ip-172-31-29-10.ap-south-1.compute.internal',
'PrivateIpAddress': '172.31.29.10'
}],
'SourceDestCheck': True,
'Status': 'in-use',
'SubnetId': 'subnet-763c651f',
'VpcId': 'vpc-b680d3df',
'InterfaceType': 'interface'
}],
'RootDeviceName': '/dev/xvda',
'RootDeviceType': 'ebs',
'SecurityGroups': [{
'GroupName': 'launch-wizard-2',
'GroupId': 'sg-068aab378fd4baf27'
}],
'SourceDestCheck': True,
'Tags': [{
'Key': 'Name',
'Value': 'EC2-A'
}],
'VirtualizationType': 'hvm',
'CpuOptions': {
'CoreCount': 1,
'ThreadsPerCore': 1
},
'CapacityReservationSpecification': {
'CapacityReservationPreference': 'open'
},
'HibernationOptions': {
'Configured': False
},
'MetadataOptions': {
'State': 'applied',
'HttpTokens': 'optional',
'HttpPutResponseHopLimit': 1,
'HttpEndpoint': 'enabled'
},
'EnclaveOptions': {
'Enabled': False
}
}],
'OwnerId': '082923708139',
'ReservationId': 'r-00b4fb251f781e276'
}],
'ResponseMetadata': {
'RequestId': '1d003b2f-5fad-4e8c-ada7-e7ca2a33fb1d',
'HTTPStatusCode': 200,
'HTTPHeaders': {
'x-amzn-requestid': '1d003b2f-5fad-4e8c-ada7-e7ca2a33fb1d',
'content-type': 'text/xml;charset=UTF-8',
'transfer-encoding': 'chunked',
'vary': 'accept-encoding',
'date': 'Sun, 13 Dec 2020 05:52:52 GMT',
'server': 'AmazonEC2'
},
'RetryAttempts': 0
}
}
'''
| pravin2610/myfirstrepo | 1a_list_ec2.py | 1a_list_ec2.py | py | 6,183 | python | en | code | 0 | github-code | 50 |
8642522026 | import bs4 as bs
import urllib.request as req
import down as manhas
length = int(input('Enter your How many video of playlist u want to downlode :'))
link = input('Enter the url of 1st video : ')#'https://www.youtube.com/watch?v=P6YJy2fmJ1o&list=RDP6YJy2fmJ1o'
name = ''
main_list = []
main_list.append(link)
list1 = []
sou = req.urlopen(link).read()
soup = bs.BeautifulSoup(sou,'lxml')
#for item in soup.find_all('h1'):
# name = item.text
#print(name)
item = soup.find('div' , {'id' :'content'})
for io in soup.find_all('a'):
list1.append(io.get('href'))
list1 = list1[6:(length+6)]
#print(list1)
for string in list1:
main_list.append('https://en.savefrom.net/#url=https://youtube.com'+string+'&utm_source=youtube.com&utm_medium=short_domains&utm_campaign=www.ssyoutube.com')
main_list[0] = 'https://en.savefrom.net/#url='+link+'&list=RDP6YJy2fmJ1o&utm_source=youtube.com&utm_medium=short_domains&utm_campaign=www.ssyoutube.com'
z = 0
for i in range(0,length):
manhas.downlode(main_list[i])
#link = "https://en.savefrom.net/#url=http://youtube.com/watch?v=89lQ5k5F_hM&list=RDMM89lQ5k5F_hM&utm_source=youtube.com&utm_medium=short_domains&utm_campaign=www.ssyoutube.com"
| Monkeydluffy3/youtube-playlist-downloder | try.py | try.py | py | 1,204 | python | en | code | 2 | github-code | 50 |
2203159720 | from gui.docking import *
from gui.layout_manger import *
from gui.state_manager import *
from gui.window_manager import *
from project.project_manager import *
def entry_point():
layout_manager = LayoutManager()
settings = Settings()
app_settings = AppSettings()
project = Project()
hello_imgui.set_assets_folder(demo_utils.demos_assets_folder())
# Application state
app_state = AppState()
docking = Docking()
window_manager = WindowManager(app_state)
app_state.project.create_project("artel_project_temp", "/Users/amirmohammad")
# app_state.project.save()
# app_state.project.open_project("artel_project_temp")
app_state.colors.append_color(Color(0, "Color1", [0.5, 0.5, 0.5], True))
app_state.colors.append_color(Color(1, "Color2", [0.5, 0.5, 0.5], True))
app_state.colors.append_color(Color(2, "Color3", [0.5, 0.5, 0.5], True))
# Hello ImGui params (they hold the settings as well as the Gui callbacks)
runner_params = hello_imgui.RunnerParams()
runner_params.app_window_params.window_title = "Artel Slicer"
runner_params.imgui_window_params.menu_app_title = "Artel Slicer App"
runner_params.app_window_params.window_geometry.size = (1000, 900)
runner_params.app_window_params.restore_previous_geometry = True
# Set LoadAdditionalFonts callback
runner_params.callbacks.load_additional_fonts = window_manager.load_fonts
runner_params.imgui_window_params.show_status_bar = True
# Add custom widgets in the status bar
runner_params.callbacks.show_status = lambda: window_manager.status_bar_gui()
# uncomment next line in order to hide the FPS in the status bar
# runner_params.im_gui_window_params.show_status_fps = False
#
# Menu bar
#
runner_params.imgui_window_params.show_menu_bar = True # We use the default menu of Hello ImGui
# fill callbacks ShowMenuGui and ShowAppMenuItems, to add items to the default menu and to the App menu
runner_params.callbacks.show_menus = lambda: window_manager.show_menu_gui(app_state)
runner_params.callbacks.show_app_menu_items = window_manager.show_app_menu_items
#
# Load user settings at callbacks `post_init` and save them at `before_exit`
#
runner_params.callbacks.post_init = lambda: settings.load_my_app_settings(app_state)
runner_params.callbacks.before_exit = lambda: settings.save_my_app_settings(app_state)
#
# Part 2: Define the application layout and windows
#
# First, tell HelloImGui that we want full screen dock space (this will create "MainDockSpace")
runner_params.imgui_window_params.default_imgui_window_type = \
hello_imgui.DefaultImGuiWindowType.provide_full_screen_dock_space
# In this demo, we also demonstrate multiple viewports: you can drag windows outside out the main window
# in order to put their content into new native windows
runner_params.imgui_window_params.enable_viewports = True
# Set the default layout (this contains the default DockingSplits and DockableWindows)
runner_params.docking_params = layout_manager.create_default_layout(app_state)
# Add alternative layouts
runner_params.alternative_docking_layouts = layout_manager.create_alternative_layouts(app_state)
#
# Part 3: Run the app
#
hello_imgui.run(runner_params)
if __name__ == "__main__":
entry_point()
| AmirmohammadZarif/Artel | Engine/ArtelSlicer/main.py | main.py | py | 3,418 | python | en | code | 0 | github-code | 50 |
41642694128 | import matplotlib.pyplot as plt
import streamlit as st
import pandas as pd
from streamlit_lottie import st_lottie
import requests
import numpy as np
from sklearn import preprocessing
from sklearn.svm import SVR
import pickle
st.set_page_config(layout="wide")
def load_lottieurl(url: str):
r = requests.get(url)
if r.status_code != 200:
return None
return r.json()
# load the model from disk
@st.cache
def load_model():
return pickle.load(open('model.pkl', 'rb'))
file_url = 'https://assets3.lottiefiles.com/packages/lf20_drzlffcc.json'
lottie_sea = load_lottieurl(file_url)
st_lottie(lottie_sea, speed=2, height=200, key="initial")
# Side Bar
abalone_df = pd.read_csv('abalone.csv')
min_vals = abalone_df.min()
max_vals = abalone_df.max()
st.sidebar.title('Abalone Predictive Data')
sex_box = st.sidebar.selectbox("Sex", ("Male", "Infant", "Female"))
lenght = st.sidebar.slider('Lenght', min_vals[1], max_vals[1], 0.01)
diameter = st.sidebar.slider('Diameter', min_vals[2], max_vals[2], 0.01)
height = st.sidebar.slider('Height', min_vals[3], max_vals[3], 0.01)
whole_weight = st.sidebar.slider('Whole Weight', min_vals[4], max_vals[4], 0.01)
shucked_weight = st.sidebar.slider('Shucked Weight', min_vals[5], max_vals[5], 0.01)
viscera_weight = st.sidebar.slider('Viscera Weight', min_vals[6], max_vals[6], 0.01)
# Midle Page
st.title('Abalone Age Prediction')
st.write(' Predicting the age of abalone from physical measurements.'
'The age of abalone is determined by cutting the shell through'
'the cone, staining it, and counting the number of rings through'
'a microscope -- a boring and time-consuming task. Other measurements, '
'which are easier to obtain, are used to predict the age. Further information, '
'such as weather patterns and location (hence food availability)'
'may be required to solve the problem.')
if sex_box == 'Male':
sex_box = float(2)
if sex_box == 'Infant':
sex_box = float(1)
if sex_box == 'Female':
sex_box = float(0)
le = preprocessing.LabelEncoder()
le.fit(abalone_df['sex'])
abalone_df['sex'] = le.transform(abalone_df['sex'])
model = load_model()
arr = np.array([[sex_box, lenght, diameter, height, whole_weight, shucked_weight, viscera_weight]])
# predictions
preds = np.floor(model.predict(arr))
st.title('Your Prediction of Abalone Rings is ' + str(int(preds[0])))
# Data Columns
col1, col2, col3 = st.columns(3)
with col1:
fig, ax = plt.subplots()
lenght = abalone_df.to_numpy()[:,1]
dia = abalone_df.to_numpy()[:,2]
ax.set_title("Relationship between Diameter and Lenght", size=20)
ax.scatter(lenght, dia, c=dia, s=40, cmap=plt.cm.RdYlBu)
ax.grid('on')
ax.set_xlabel('lenght', size=15)
ax.set_ylabel('diameter', size=15)
st.pyplot(fig)
with col2:
fig, ax = plt.subplots()
sex = abalone_df.to_numpy()[:,0]
rings = abalone_df.to_numpy()[:,-1]
ax.set_title("Rings Binned Data", size=20)
ax.bar(sex, rings, color='red')
ax.grid('on')
ax.set_xticks(np.arange(3))
ax.set_xticklabels(['F', 'I', 'M'])
ax.set_xlabel('sex', size=15)
ax.set_ylabel('rings', size=15)
st.pyplot(fig)
with col3:
st.write('**Dataset Structure**')
st.dataframe(abalone_df)
# Dataframe description
st.write('Dataset Description')
st.table(abalone_df.describe())
# Correlation Matrix
_, col2, _ = st.columns(3)
with col2:
fig, ax = plt.subplots(figsize=(5,5))
columns = abalone_df.columns[:-1]
values = abalone_df[columns].corr().values
im = ax.imshow(values)
# We want to show all ticks...
ax.set_xticks(np.arange(len(columns)))
ax.set_yticks(np.arange(len(columns)))
# ... and label them with the respective list entries
ax.set_xticklabels(columns, size=12)
ax.set_yticklabels(columns, size=12)
# Rotate the tick labels and set their alignment.
plt.setp(ax.get_xticklabels(), rotation=45, ha="right", rotation_mode="anchor")
# Loop over data dimensions and create text annotations.
for i in range(len(columns)):
for j in range(len(columns)):
text = ax.text(j, i, np.round(values[i, j],2), ha="center", va="center", color="w")
ax.set_title("Correlation Matrix")
fig.tight_layout()
st.pyplot(fig) | issaiass/BDev---Abalone-Ring-Prediction | streamlit.py | streamlit.py | py | 4,307 | python | en | code | 0 | github-code | 50 |
34961777963 | from manimlib.imports import *
class lesson_1(Scene):
def construct(self):
title = TextMobject("An overview on Trigonometry")
title.scale(1)
self.play(Write(title))
self.wait(3)
self.play(FadeOut(title))
'''insert unit circle diagram'''
trig = TextMobject("Trigonon --> Triangle")
trig2 = TextMobject("Metron --> Measure")
self.play(Write(trig))
trig2.move_to(DOWN)
self.wait()
self.play(Write(trig2))
self.wait(3)
self.play(FadeOut(trig), FadeOut(trig2))
self.wait(3)
text2 = TextMobject("Relates angles with sides!")
self.play(Write(text2))
self.wait(3)
self.play(FadeOut(text2))
grid = NumberPlane()
self.add(grid)
start = (0,0,0)
end = (3,3,0)
arc2 = Arc()
arclabel = TexMobject("\\theta")
arclabel.move_to(RIGHT* 0.2 + UP * 0.1)
arclabel.scale(0.5)
arc2.scale(0.22)
arc2.move_to(RIGHT*0.3 + UP * 0.15)
label1, label2, label3 = TextMobject("A"), TextMobject("B"), TextMobject("C")
label1.move_to((3, 3.3, 0))
label2.move_to((3, -0.3, 0))
label3.move_to((0, -0.3, 0))
sidelabel = TextMobject('hyp')
sidelabel2 = TextMobject('opp')
sidelabel3 = TextMobject('adj')
sidelabel.move_to((1.5, 1.8, 0))
sidelabel.rotate(PI/4)
sidelabel2.move_to((3.6,1.5,0))
sidelabel3.move_to((1.5,-0.5,0))
line1 = Line(start, end)
line2 = Line(end, (3,0,0))
start2 = (3,0.5,0)
end2 = (2.5,0.5,0)
end3 = (2.5,0,0)
line3 = Line(start2, end2)
line4 = Line(end2, end3)
self.play(ShowCreation(line1), ShowCreation(line2), ShowCreation(arc2), ShowCreation(arclabel), ShowCreation(line3), ShowCreation(line4))
self.add(label1, label2, label3, sidelabel, sidelabel2, sidelabel3)
self.wait(3)
self.remove(label1, label2, label3, sidelabel, sidelabel2, sidelabel3)
self.play(FadeOut(line1), FadeOut(line2), FadeOut(arc2), FadeOut(arclabel), FadeOut(line3), FadeOut(line4))
self.remove(grid)
self.wait(3)
f1,f2,f3,f4,f5,f6 = TexMobject("sin \\theta"), TexMobject("cos \\theta"), TexMobject("tan \\theta"), TexMobject("csc \\theta"), TexMobject ("sec \\theta"), TexMobject("cot \\theta")
f1.move_to(UP*2 + LEFT * 2)
f2.move_to(LEFT*2)
f3.move_to(LEFT * 2 + DOWN * 2)
f4.move_to(UP*2)
f6.move_to (DOWN*2)
self.play(Write(f1),Write(f2), Write(f3), Write(f4), Write(f5), Write(f6))
self.wait(3)
self.play(FadeOut(f1), FadeOut(f2), FadeOut(f3), FadeOut(f4), FadeOut(f5), FadeOut(f6))
si = TexMobject("sin \\theta = \\frac{opposite}{hypoteneuse}")
cosayn = TexMobject("cos \\theta = \\frac{adjacent}{hypoteneuse}")
tan = TexMobject("tan \\theta = \\frac{opposite}{adjacent}")
csc = TexMobject("csc \\theta = \\frac{1}{sin \\theta}")
sec = TexMobject("sec \\theta = \\frac{1}{cos \\theta}")
cot = TexMobject("cot \\theta = \\frac{1}{tan \\theta}")
si.move_to(UP*2 + LEFT * 2)
cosayn.move_to(LEFT*2)
tan.move_to(LEFT * 2 + DOWN * 2)
csc.move_to(UP*2+ RIGHT * 2)
sec.move_to(RIGHT*2)
cot.move_to (DOWN*2+ RIGHT * 2)
self.play(Write(si),Write(cosayn), Write(tan), Write(csc), Write(sec), Write(cot))
self.wait(10)
self.play(FadeOut(si),FadeOut(cosayn), FadeOut(tan), FadeOut(csc), FadeOut(sec), FadeOut(cot))
self.wait(5)
grid = NumberPlane()
self.add(grid)
start = (0,0,0)
end = (3,3,0)
arc2 = Arc()
arclabel = TexMobject("\\theta")
arclabel.move_to(RIGHT* 0.2 + UP * 0.1)
arclabel.scale(0.5)
arc2.scale(0.22)
arc2.move_to(RIGHT*0.3 + UP * 0.15)
label1, label2, label3 = TextMobject("A"), TextMobject("B"), TextMobject("C")
label1.move_to((3, 3.3, 0))
label2.move_to((3, -0.3, 0))
label3.move_to((0, -0.3, 0))
theta_value = TexMobject("\\theta = 60 ^{\\circ}")
theta_value.move_to((1,-1.5,0))
sidelabel = TextMobject('hyp')
sidelabel2 = TextMobject('height')
sidelabel3 = TextMobject('50 m')
sidelabel.move_to((1.5, 1.8, 0))
sidelabel.rotate(PI/4)
sidelabel2.move_to((3.6,1.5,0))
sidelabel3.move_to((1.5,-0.5,0))
line1 = Line(start, end)
line2 = Line(end, (3,0,0))
start2 = (3,0.5,0)
end2 = (2.5,0.5,0)
end3 = (2.5,0,0)
line3 = Line(start2, end2)
line4 = Line(end2, end3)
self.play(ShowCreation(line1), ShowCreation(line2), ShowCreation(arc2), ShowCreation(arclabel), ShowCreation(line3), ShowCreation(line4))
self.add(label1, label2, label3, sidelabel, sidelabel2, sidelabel3, theta_value)
self.wait(10)
self.remove(label1, label2, label3, sidelabel, sidelabel2, sidelabel3, theta_value)
self.play(FadeOut(line1), FadeOut(line2), FadeOut(arc2), FadeOut(arclabel), FadeOut(line3), FadeOut(line4))
self.remove(grid)
self.wait(3)
ans = TexMobject("tan 60 = \\frac{height}{50}")
ans2 = TextMobject("height = 50 tan 60 meters!")
self.play(Write(ans))
self.wait(2)
ans2.move_to(DOWN * 2)
self.play(Write(ans2))
self.wait(2)
self.play(FadeOut(ans),FadeOut(ans2))
'''insert unit circle diagram'''
| AjayArvind2207/YT | OverviewTrig/trig1.py | trig1.py | py | 5,651 | python | en | code | 0 | github-code | 50 |
12602492470 | import hashlib
import json
import time
import uuid
from enum import IntEnum
from typing import List, Tuple, Union
from zhixuewang.models import (BasicSubject, ExtendedList, Exam, Homework, HwAnsPubData, HwResource, HwType, Mark, StuHomework, Subject, SubjectScore,
StuClass, School, Sex, Grade, Phase, ExamInfo,
StuPerson, StuPersonList)
from zhixuewang.exceptions import UserDefunctError, PageConnectionError, PageInformationError
from zhixuewang.student.urls import Url
from json import JSONDecodeError
def _check_is_uuid(msg: str):
"""判断msg是否为uuid"""
return len(msg) == 36 and msg[14] == "4" and msg[8] == msg[13] == msg[18] == msg[23] == "-"
def _md5_encode(msg: str) -> str:
md5 = hashlib.md5()
md5.update(msg.encode(encoding="utf-8"))
return md5.hexdigest()
class FriendMsg(IntEnum):
SUCCESS = 200 # 邀请成功
ALREADY = 201 # 已发送过邀请,等待对方答复
UNDEFINED = 202 # 未知错误
class StudentAccount(StuPerson):
"""学生账号"""
def __init__(self, session):
super().__init__()
self._session = session
self.username = ""
self.role = "student"
self.token_timestamp = ["", 0]
def _get_auth_header(self) -> dict:
"""获取header"""
auth_guid = str(uuid.uuid4())
auth_time_stamp = str(int(time.time() * 1000))
auth_token = _md5_encode(auth_guid + auth_time_stamp +
"iflytek!@#123student")
token, cur_time = self.token_timestamp
if token and time.time() - cur_time < 600: # 判断token是否过期
return {
"authbizcode": "0001",
"authguid": auth_guid,
"authtimestamp": auth_time_stamp,
"authtoken": auth_token,
"XToken": token
}
r = self._session.get(Url.XTOKEN_URL, headers={
"authbizcode": "0001",
"authguid": auth_guid,
"authtimestamp": auth_time_stamp,
"authtoken": auth_token
})
if not r.ok:
raise PageConnectionError(
f"_get_auth_header中出错, 状态码为{r.status_code}")
try:
if r.json()["errorCode"] != 0:
raise PageInformationError(
f"_get_auth_header出错, 错误信息为{r.json()['errorInfo']}")
self.token_timestamp[0] = r.json()["result"]
except (JSONDecodeError, KeyError) as e:
raise PageInformationError(
f"_get_auth_header中网页内容发生改变, 错误为{e}, 内容为\n{r.text}")
self.token_timestamp[1] = time.time()
return self._get_auth_header()
def set_base_info(self):
"""设置账户基本信息, 如用户id, 姓名, 学校等"""
r = self._session.get(Url.INFO_URL)
if not r.ok:
raise PageConnectionError(f"set_base_info出错, 状态码为{r.status_code}")
try:
json_data = r.json()["student"]
if not json_data.get("clazz", False):
raise UserDefunctError()
self.code = json_data.get("code")
self.name = json_data.get("name")
self.avatar = json_data.get("avatar")
self.gender = Sex.BOY if json_data.get(
"gender") == "1" else Sex.GIRL
self.username = json_data.get("loginName")
self.id = json_data.get("id")
self.mobile = json_data.get("mobile")
self.email = json_data.get("email")
self.qq_number = json_data.get("im")
self.clazz = StuClass(
id=json_data["clazz"]["id"],
name=json_data["clazz"]["name"],
school=School(
id=json_data["clazz"]["division"]["school"]["id"],
name=json_data["clazz"]["division"]["school"]["name"]),
grade=Grade(code=json_data["clazz"]["division"]["grade"]["code"],
name=json_data["clazz"]["division"]["grade"]["name"],
phase=Phase(code=json_data["clazz"]["division"]
["grade"]["phase"]["code"],
name=json_data["clazz"]["division"]
["grade"]["phase"]["name"])))
self.birthday = json_data.get("birthday", 0)
except (JSONDecodeError, KeyError) as e:
raise PageInformationError(
f"set_base_info中网页内容发生改变, 错误为{e}, 内容为\n{r.text}")
return self
def get_exam(self, exam_data: Union[Exam, str] = "") -> Exam:
"""获取考试
Args:
exam_data (Union[Exam, str]): 考试id 或 考试名称, 为Exam实例时直接返回, 为默认值时返回最新考试
Returns:
Exam
"""
if not exam_data:
return self.get_latest_exam()
if isinstance(exam_data, Exam):
if not exam_data:
return self.get_latest_exam()
elif exam_data.class_rank and exam_data.grade_rank:
return exam_data
else:
return self.get_exams().find_by_id(exam_data.id)
exams = self.get_exams()
if _check_is_uuid(exam_data):
exam = exams.find_by_id(exam_data) # 为id
else:
exam = exams.find_by_name(exam_data)
return exam
def get_page_exam(self, page_index: int) -> Tuple[ExtendedList[Exam], bool]:
"""获取指定页数的考试列表"""
exams: ExtendedList[Exam] = ExtendedList()
r = self._session.get(Url.GET_EXAM_URL,
params={
"pageIndex": page_index,
"pageSize": 10
},
headers=self._get_auth_header())
if not r.ok:
raise PageConnectionError(
f"get_page_exam中出错, 状态码为{r.status_code}")
try:
json_data = r.json()["result"]
for exam_data in json_data["examList"]:
exam = Exam(
id=exam_data["examId"],
name=exam_data["examName"]
)
exam.create_time = exam_data["examCreateDateTime"]
exams.append(exam)
hasNextPage: bool = json_data["hasNextPage"]
except (JSONDecodeError, KeyError) as e:
raise PageInformationError(
f"get_page_exam中网页内容发生改变, 错误为{e}, 内容为\n{r.text}")
return exams, hasNextPage
def get_latest_exam(self) -> ExamInfo:
"""获取最新考试"""
r = self._session.get(Url.GET_RECENT_EXAM_URL,
headers=self._get_auth_header())
if not r.ok:
raise PageConnectionError(
f"get_latest_exam中出错, 状态码为{r.status_code}")
try:
json_data = r.json()["result"]
exam_info_data = json_data["examInfo"]
subjects: ExtendedList[Subject] = ExtendedList()
for subject_data in exam_info_data["subjectScores"]:
subjects.append(Subject(
id=subject_data["topicSetId"],
name=subject_data["subjectName"],
code=subject_data["subjectCode"]
))
exam_info = ExamInfo(
id=exam_info_data["examId"],
name=exam_info_data["examName"],
subjects=subjects,
classId=exam_info_data["classId"],
grade_code=json_data["gradeCode"],
is_final=exam_info_data["isFinal"]
)
exam_info.create_time = exam_info_data["examCreateDateTime"]
except (JSONDecodeError, KeyError) as e:
raise PageInformationError(
f"get_latest_exam中网页内容发生改变, 错误为{e}, 内容为\n{r.text}")
return exam_info
def get_exams(self) -> ExtendedList[Exam]:
"""获取所有考试"""
exams: ExtendedList[Exam] = ExtendedList()
i = 1
check = True
while check:
cur_exams, check = self.get_page_exam(i)
exams.extend(cur_exams)
i += 1
return exams
def __get_self_mark(self, exam: Exam, has_total_score: bool) -> Mark:
mark = Mark(exam=exam, person=self)
r = self._session.get(Url.GET_MARK_URL,
params={"examId": exam.id},
headers=self._get_auth_header())
if not r.ok:
raise PageConnectionError(
f"__get_self_mark中出错, 状态码为{r.status_code}")
try:
json_data = r.json()
json_data = json_data["result"]
# exam.name = json_data["total_score"]["examName"]
# exam.id = json_data["total_score"]["examId"]
for subject in json_data["paperList"]:
subject_score = SubjectScore(
score=subject["userScore"],
subject=Subject(
id=subject["paperId"],
name=subject["subjectName"],
code=subject["subjectCode"],
standard_score=subject["standardScore"],
exam_id=exam.id),
person=StuPerson()
)
# subject_score.create_time = 0
mark.append(subject_score)
total_score = json_data.get("totalScore")
if has_total_score and total_score:
subject_score = SubjectScore(
score=total_score["userScore"],
subject=Subject(
id="",
name=total_score["subjectName"],
code="99",
standard_score=total_score["standardScore"],
exam_id=exam.id,
),
person=StuPerson(),
class_rank=exam.class_rank,
grade_rank=exam.grade_rank
)
# subject_score.create_time = 0
mark.append(subject_score)
except (JSONDecodeError, KeyError) as e:
raise PageInformationError(
f"__get_self_mark中网页内容发生改变, 错误为{e}, 内容为\n{r.text}")
return mark
def get_self_mark(self,
exam_data: Union[Exam, str] = "",
has_total_score: bool = True) -> Mark:
"""获取指定考试的成绩
Args:
exam_data (Union[Exam, str]): 考试id 或 考试名称 或 Exam实例, 默认值为最新考试
has_total_score (bool): 是否计算总分, 默认为True
Returns:
Mark
"""
exam = self.get_exam(exam_data)
if exam is None:
return Mark()
return self.__get_self_mark(exam, has_total_score)
def __get_subjects(self, exam: Exam) -> ExtendedList[Subject]:
subjects: ExtendedList[Subject] = ExtendedList()
r = self._session.get(Url.GET_SUBJECT_URL,
params={"examId": exam.id},
headers=self._get_auth_header())
if not r.ok:
raise PageConnectionError(
f"__get_subjects中出错, 状态码为{r.status_code}")
try:
json_data = r.json()
for subject in json_data["result"]["paperList"]:
subjects.append(
Subject(id=subject["paperId"],
name=subject["subjectName"],
code=subject["subjectCode"],
standard_score=subject["standardScore"],
exam_id=exam.id))
except (JSONDecodeError, KeyError) as e:
raise PageInformationError(
f"__get_subjects中网页内容发生改变, 错误为{e}, 内容为\n{r.text}")
return subjects
def get_subjects(self, exam_data: Union[Exam, str] = "") -> ExtendedList[Subject]:
"""获得指定考试的所有学科(不算总分)
Args:
exam_data (Union[Exam, str]): 考试id 或 考试名称 或 Exam实例, 默认值为最新考试
Returns:
ExtendedList[Subject]
"""
exam = self.get_exam(exam_data)
if exam is None:
return ExtendedList([])
return self.__get_subjects(exam)
def __get_subject(self, exam: Exam, subject_data: str):
subjects = self.get_subjects(exam)
if _check_is_uuid(subject_data): # 判断为id还是名称
subject = subjects.find_by_id(subject_data) # 为id
else:
subject = subjects.find_by_name(subject_data) # 为名称
return subject
def get_subject(self,
subject_data: Union[Subject, str],
exam_data: Union[Exam, str] = "") -> Subject:
"""获取指定考试的学科
Args:
subject_data (Union[Subject, str]): 学科id 或 学科名称, 为Subject实例时直接返回
exam_data (Union[Exam, str]): 考试id 或 考试名称 或 Exam实例, 默认值为最新考试
Returns:
Subject
"""
if isinstance(subject_data, Subject):
return subject_data
exam = self.get_exam(exam_data)
if exam is None:
return Subject()
subject = self.__get_subject(exam, subject_data)
return subject if subject is not None else Subject()
def __get_original(self, subject_id: str, exam_id: str) -> List[str]:
r = self._session.get(Url.GET_ORIGINAL_URL,
params={
"examId": exam_id,
"paperId": subject_id,
},
headers=self._get_auth_header())
if not r.ok:
raise PageConnectionError(
f"__get_original中出错, 状态码为{r.status_code}")
try:
json_data = r.json()
image_urls = []
for image_url in json.loads(json_data["result"]["sheetImages"]):
image_urls.append(image_url)
except (JSONDecodeError, KeyError) as e:
raise PageInformationError(
f"__get_original中网页内容发生改变, 错误为{e}, 内容为\n{r.text}")
return image_urls
def get_original(self,
subject_data: Union[Subject, str],
exam_data: Union[Exam, str] = "") -> List[str]:
"""获得指定考试学科的原卷地址
Args:
subject_data (Union[Subject, str]): 学科id 或 学科名称 或 Subject实例
exam_data (Union[Exam, str]): 考试id 或 考试名称, 默认为最新考试
Returns:
List[str]: 原卷地址的列表
"""
exam = self.get_exam(exam_data)
if not exam:
return []
subject = self.get_subject(subject_data, exam)
if not subject:
return []
return self.__get_original(subject.id, exam.id)
def get_clazzs(self) -> ExtendedList[StuClass]:
"""获取当前年级所有班级"""
clazzs: ExtendedList[StuClass] = ExtendedList()
r = self._session.get(Url.GET_CLAZZS_URL,
params={"d": int(time.time())})
if not r.ok:
raise PageConnectionError(f"get_clazzs中出错, 状态码为{r.status_code}")
try:
json_data = r.json()
for clazz in json_data["clazzs"]:
clazzs.append(
StuClass(name=clazz["name"],
id=clazz["id"],
grade=self.clazz.grade,
school=self.clazz.school))
except (JSONDecodeError, KeyError) as e:
raise PageInformationError(
f"get_clazzs中网页内容发生改变, 错误为{e}, 内容为\n{r.text}")
return clazzs
def get_clazz(self, clazz_data: Union[StuClass, str] = "") -> StuClass:
"""获取当前年级班级
Args:
clazz_data (Union[StuClass, str]): 班级id 或 班级名称, 为StuClass实例时直接返回, 为空时返回自己班级
Returns:
StuClass
"""
if not clazz_data:
return self.clazz
if isinstance(clazz_data, StuClass):
return clazz_data
clazzs = self.get_clazzs()
if clazz_data.isdigit(): # 判断为id还是名称
clazz = clazzs.find_by_id(clazz_data) # 为id
else:
clazz = clazzs.find_by_name(clazz_data) # 为名称
return clazz
def __get_classmates(self, clazz_id: str) -> ExtendedList[StuPerson]:
classmates = StuPersonList()
r = self._session.get(Url.GET_CLASSMATES_URL,
params={
"r": f"{self.id}student",
"clazzId": clazz_id
})
if not r.ok:
raise PageConnectionError(
f"__get_classmates中出错, 状态码为{r.status_code}")
try:
json_data = r.json()
for classmate_data in json_data:
birthday = int(int(classmate_data.get("birthday", 0)) / 1000)
classmate = StuPerson(
name=classmate_data["name"],
id=classmate_data["id"],
clazz=StuClass(
id=classmate_data["clazz"]["id"],
name=classmate_data["clazz"]["name"],
grade=self.clazz.grade,
school=School(
id=classmate_data["clazz"]["school"]["id"],
name=classmate_data["clazz"]["school"]["name"])),
code=classmate_data.get("code"),
email=classmate_data["email"],
qq_number=classmate_data["im"],
gender=Sex.BOY if classmate_data["gender"] == "1" else Sex.GIRL,
mobile=classmate_data["mobile"])
classmate.birthday = birthday
classmates.append(classmate)
except (JSONDecodeError, KeyError) as e:
raise PageInformationError(
f"__get_classmates中网页内容发生改变, 错误为{e}, 内容为\n{r.text}")
return classmates
def get_classmates(self, clazz_data: Union[StuClass, str] = "") -> ExtendedList[StuPerson]:
"""获取指定班级里学生列表
Args:
clazz_data (Union[StuClass, str]): 班级id 或 班级名称 或 StuClass实例, 为空时获取本班学生列表
Returns:
ExtendedList[StuPerson]
"""
clazz = self.get_clazz(clazz_data)
if clazz is None:
return ExtendedList([])
return self.__get_classmates(clazz.id)
def get_friends(self) -> ExtendedList[StuPerson]:
"""获取朋友列表"""
friends = StuPersonList()
r = self._session.get(Url.GET_FRIEND_URL,
params={"d": int(time.time())})
if not r.ok:
raise PageConnectionError(f"get_friends中出错, 状态码为{r.status_code}")
try:
json_data = r.json()
for friend in json_data["friendList"]:
friends.append(
StuPerson(name=friend["friendName"], id=friend["friendId"]))
except (JSONDecodeError, KeyError) as e:
raise PageInformationError(
f"get_friends中网页内容发生改变, 错误为{e}, 内容为\n{r.text}")
return friends
def invite_friend(self, friend: Union[StuPerson, str]) -> FriendMsg:
"""邀请朋友
Args:
friend (Union[StuPerson, str]): 用户id 或 StuPerson的实例
Returns:
FriendMsg
"""
user_id = friend
if isinstance(friend, StuPerson):
user_id = friend.id
r = self._session.get(Url.INVITE_FRIEND_URL,
params={
"d": int(time.time()),
"friendId": user_id,
"isTwoWay": "true"
})
if not r.ok:
raise PageConnectionError(f"invite_friend中出错, 状态码为{r.status_code}")
json_data = r.json()
if json_data["result"] == "success":
return FriendMsg.SUCCESS
elif json_data["message"] == "已发送过邀请,等待对方答复":
return FriendMsg.ALREADY
else:
return FriendMsg.UNDEFINED
def remove_friend(self, friend: Union[StuPerson, str]) -> bool:
"""删除朋友
Args:
friend (Union[StuPerson, str]): 用户id 或 StuPerson的实例
Returns:
bool: True 表示删除成功, False 表示删除失败
"""
user_id = friend
if isinstance(friend, StuPerson):
user_id = friend.id
r = self._session.get(Url.DELETE_FRIEND_URL,
params={
"d": int(time.time()),
"friendId": user_id
})
if not r.ok:
raise PageConnectionError(f"remove_friend中出错, 状态码为{r.status_code}")
return r.json()["result"] == "success"
def get_homeworks(self, size: int = 20, is_complete: bool = False, subject_code: str = "-1", createTime: int = 0) -> ExtendedList[StuHomework]:
"""获取指定数量的作业(暂时不支持获取所有作业)
Args:
size (int): 返回的数量
is_complete (bool): True 表示取已完成的作业, False 表示取未完成的作业
subject_code (code): "01" 表示取语文作业, "02"表示取数学作业, 以此类推
createTime (int): 取创建时间在多久以前的作业, 0表示从最新取 (暂时用不到)
Returns:
ExtendedList[StuHomework]: 作业(不包含作业资源)
"""
r = self._session.get(Url.GET_HOMEWORK_URL, params={
"pageIndex": 2,
"completeStatus": 1 if is_complete else 0,
"pageSize": size, # 取几个
"subjectCode": subject_code,
"token": self._get_auth_header()["XToken"],
"createTime": createTime # 创建时间在多久以前的 0 为从最新开始
})
homeworks: ExtendedList[StuHomework] = ExtendedList()
data = r.json()["result"]
for each in data["list"]:
homeworks.append(StuHomework(
id=each["hwId"],
title=each["hwTitle"],
type=HwType(
name=each["homeWorkTypeDTO"]["typeName"],
code=each["homeWorkTypeDTO"]["typeCode"],
),
begin_time=each["beginTime"] / 1000,
end_time=each["endTime"] / 1000,
create_time=each["createTime"] / 1000,
subject=BasicSubject(
name=each["subjectName"],
code=each["subjectCode"]
),
is_allow_makeup=bool(each["isAllowMakeup"]),
class_id=each["classId"],
ansPubData=HwAnsPubData(
name=each["openAnswerDTO"]["answerPubName"],
code=each["openAnswerDTO"]["answerPubType"]
),
stu_hwid=each["stuHwId"]
))
return homeworks
def get_homework_resources(self, hwid: str, hw_typecode: int) -> List[HwResource]:
"""获取指定作业的作业资源(例如题目文档)
Args:
hwid (str): 作业id
hw_typecode (int): 作业类型代码
Returns:
List[HwResource]: 作业资源
"""
if hw_typecode == 102:
return []
r = self._session.post(Url.GET_HOMEWORK_RESOURCE_URL, json={
"base":{
"appId": "WNLOIVE",
"appVersion": "",
"sysVersion": "v1001",
"sysType": "web",
"packageName": "com.iflytek.edu.hw",
"udid": self.id,
"expand": {}
},
"params": {"hwId":hwid}
}, headers={
"Authorization": self._get_auth_header()["XToken"],
})
data = r.json()["result"]
resources = []
for each in data["topicAttachments"]:
resources.append(HwResource(
name=each["name"],
path=each["path"]
))
return resources
| SkinCrab/zhixuewang-python | zhixuewang/student/student.py | student.py | py | 25,269 | python | en | code | null | github-code | 50 |
73643328794 | import random
import sys
def game_play():
#the first choice input made by player
your_choice = input("Pick a hand form(Rock,Paper or Scissors): ")
#Possible options in the form of a list
possibilities = ["rock", "paper","scissors"]
#the computers random choice instruction
comp_choice = random.choice(possibilities)
print(f"(\nYou Chose {your_choice}, the computer chose {comp_choice}.\n")
if your_choice == comp_choice:
print(f"Both players selecetd {your_choice}. it is a tie!")
elif your_choice == "rock":
if comp_choice == "scissors":
print("Rock smashes scissors! You Win!")
else:
print("paper covers rock, You lose!")
elif your_choice == "paper":
if comp_choice == "rock":
print("Scissors cuts paper! you lose.")
else:
print("Rock smashes scissors, you lose.")
game_play()
game_is_on = True
continue_ = input("Would you like to continue?(Y)yes or (N)No.")
if continue_ == "Y":
game_play()
elif continue_ == "N":
game_is_on = False
print("Game Over")
| Bophelo11/RockPaperScissors | RockPaperScissor/main.py | main.py | py | 1,232 | python | en | code | 0 | github-code | 50 |
40121542286 | import sys
if sys.version_info < (3, 9):
import importlib_resources
else:
import importlib.resources as importlib_resources
from asdf.extension import ManifestExtension
from asdf.resource import DirectoryResourceMapping
import asdf_zarr
from .converter import ChunkedNdarrayConverter
def get_resource_mappings():
resources_root = importlib_resources.files(asdf_zarr) / "resources"
if not resources_root.is_dir():
raise RuntimeError("Missing resources directory")
return [
DirectoryResourceMapping(
resources_root / "schemas",
"asdf://asdf-format.org/chunked_ndarray/schemas/",
),
DirectoryResourceMapping(
resources_root / "manifests",
"asdf://asdf-format.org/chunked_ndarray/manifests/",
),
]
def get_extensions():
return [
ManifestExtension.from_uri(
"asdf://asdf-format.org/chunked_ndarray/manifests/chunked_ndarray-0.1.0",
converters=[ChunkedNdarrayConverter()],
)
]
| eslavich/asdf-zarr | src/asdf_zarr/integration.py | integration.py | py | 1,040 | python | en | code | 0 | github-code | 50 |
18575529987 | """EndpointStore Unit Tests."""
from __future__ import annotations
import uuid
from unittest import mock
import pytest
import requests
from proxystore.store.endpoint import EndpointStore
from proxystore.store.endpoint import EndpointStoreError
def test_no_endpoints_provided() -> None:
with pytest.raises(ValueError):
EndpointStore('name', endpoints=[])
def test_no_endpoints_match(endpoint_store) -> None:
with pytest.raises(EndpointStoreError, match='Failed to find'):
EndpointStore(
'name',
endpoints=[str(uuid.uuid4())],
proxystore_dir=endpoint_store.kwargs['proxystore_dir'],
)
def test_no_endpoints_accessible(endpoint_store) -> None:
response = requests.Response()
response.status_code = 400
with mock.patch('requests.get', return_value=response):
with pytest.raises(EndpointStoreError, match='Failed to find'):
EndpointStore('test', **endpoint_store.kwargs)
def test_endpoint_uuid_mismatch(endpoint_store) -> None:
response = requests.Response()
response.status_code = 200
response.json = lambda: {'uuid': str(uuid.uuid4())}
with mock.patch('requests.get', return_value=response):
with pytest.raises(EndpointStoreError, match='Failed to find'):
EndpointStore('test', **endpoint_store.kwargs)
def test_bad_responses(endpoint_store) -> None:
"""Test handling of bad responses from Endpoint."""
store = EndpointStore(
endpoint_store.name,
**endpoint_store.kwargs,
cache_size=0,
)
response = requests.Response()
response.status_code = 400
with mock.patch('requests.get', return_value=response):
key = store.set([1, 2, 3], key='key')
assert store.get(key) is None
response.status_code = 401
with mock.patch('requests.get', return_value=response):
with pytest.raises(EndpointStoreError, match='401'):
store.exists(key)
with pytest.raises(EndpointStoreError, match='401'):
store.get(key)
with mock.patch('requests.post', return_value=response):
with pytest.raises(EndpointStoreError, match='401'):
store.evict(key)
with pytest.raises(EndpointStoreError, match='401'):
store.set([1, 2, 3], key='key')
def test_key_parse() -> None:
with pytest.raises(ValueError, match='key'):
EndpointStore._parse_key('a:b:c')
| SJTU-Serverless/proxystore | tests/store/endpoint_test.py | endpoint_test.py | py | 2,438 | python | en | code | null | github-code | 50 |
43169018962 | import numpy as np
import csv
import os
import math
from collections import defaultdict
import time
import matplotlib.pyplot as plt
starttime = time.time_ns()
def Distance(Xa, Ya, Xb, Yb):#calculates the distance between 2 points with pythagoras
D = (math.sqrt((Xa-Xb)**2+(Ya-Yb)**2))
return D
pointarray = []
with open(os.path.dirname(__file__) + "\inputGAS2A.csv", "r") as input:#reads all of the points from the file
locations = csv.DictReader(input)
for pointentry in locations:
point = [int(pointentry["Point number"]),float(pointentry["x"]), float(pointentry["y"])]
pointarray.append(point)
DistanceArray = []#calculates the distance from every point to ever other points
for i in pointarray:
Columbs = []
for j in pointarray:
Columbs.append(Distance(i[1],i[2],j[1],j[2]))
DistanceArray.append(Columbs)
class Graph: #this works in general by taking in all edges as clusters of points and moving back trough the dictionaries to find cycles
def __init__(self, vertex):
self.V = vertex
self.graph = []
def add_edge(self, u, v, w):
self.graph.append([u, v, w])
def search(self, parent, i):
if parent[i] == i:
return i
return self.search(parent, parent[i])
def apply_union(self, parent, rank, x, y):
xroot = self.search(parent, x)
yroot = self.search(parent, y)
if rank[xroot] < rank[yroot]:
parent[xroot] = yroot
elif rank[xroot] > rank[yroot]:
parent[yroot] = xroot
else:
parent[yroot] = xroot
rank[xroot] += 1
def kruskal(self):
result = []
i, e = 0, 0
self.graph = sorted(self.graph, key=lambda item: item[2])
parent = []
rank = []
for node in range(self.V):
parent.append(node)
rank.append(0)
while e < self.V - 1:
u, v, w = self.graph[i]
i = i + 1
x = self.search(parent, u)
y = self.search(parent, v)
if x != y:
e = e + 1
result.append([u, v, w])
self.apply_union(parent, rank, x, y)
number,X,Y = zip(*pointarray)
for u, v, weight in result:
Outputlist[u+1][v+1] = round(weight,3)
Outputlist[v+1][u+1] = round(weight,3)
plt.plot([X[u],X[v]],[Y[u],Y[v]])
Outputlist=[]#creates an empty output list
for i in range(len(DistanceArray[0])+1):
columb = []
for j in range(len(DistanceArray[0])+1):
columb.append('-')
Outputlist.append(columb)
Tree = Graph(len(DistanceArray[0]))#creates an object from the class code
for j in range(len(DistanceArray[0])):
for i in range(len(DistanceArray[0])):
if DistanceArray[j][i] != 0:
Tree.add_edge(j,i,DistanceArray[j][i])#and adds all of the edges from the distance array.
Tree.kruskal()#uses the class code to let it solve the minimum spanning tree, also corrects the list in the code itself
Outputlist[0][0] = "x" #just used for formatting of the outputlist, adds the 1...n to the top and left
for i in range(len(Outputlist[0])-1):
Outputlist[i+1][0] = i+1
Outputlist[0][i+1] = i+1
np.savetxt(os.path.dirname(__file__) + '\OutputGAS2A2.csv', Outputlist, delimiter =", ", fmt ='% s') #writes out the list
print("--- total runtime is: " + str((time.time_ns() - starttime)/1000000) + "ms ---") #prints the total time taken
number,X,Y = zip(*pointarray)
plt.scatter(X,Y)
plt.show()#plots all of the points and opens a graph to show the minimum spanning tree | youpie/AMA_2023_Group_assignment | GA2/GA2_A_kruskal.py | GA2_A_kruskal.py | py | 3,621 | python | en | code | 0 | github-code | 50 |
16440065305 | import csv
import glob
import math
import os
import pathlib
import pickle
import random
import cv2
import numpy as np
import pandas as pd
import torch
import project_utils
from cnn_transformer import build_feature_extractor, FeatureExtractorSpec, FeatureExtractorFeatures
from tag_lut import tag_count, freq_tag_lut, dur_tag_lut, all_tags, nb_tag, all_classes
def load_frame_tags(tag_dir):
frames = {}
_, _, files = next(os.walk(tag_dir))
for file in files:
with open(os.path.join(tag_dir, file), 'rb') as f:
frames[pathlib.Path(file).stem] = pickle.load(f)
return frames
def load_videos(video_dir):
files = []
_, _, video_files = next(os.walk(video_dir))
for file in video_files:
files.append(os.path.join(video_dir, file))
return files
def load_videos_sorted_dir(top_dir):
root, folders, _ = next(os.walk(top_dir))
videos = []
video_classes = []
for folder in folders:
video_class = folder.split('.')[0]
video = []
_, _, files = next(os.walk(os.path.join(root, folder)))
for file in files:
if pathlib.Path(file).suffix == ".mp4":
video.append(os.path.join(root, folder, file))
if video:
video_classes.append(video_class)
videos.append(video)
return videos, video_classes
import re
def tryint(s):
try:
return int(s)
except:
return s
def alphanum_key(s):
""" Turn a string into a list of string and number chunks.
"z23a" -> ["z", 23, "a"]
"""
return [ tryint(c) for c in re.split('([0-9]+)', s) ]
def sort_nicely(l):
""" Sort the given list in the way that humans expect.
"""
l.sort(key=alphanum_key)
def load_preliminary_dataset(classes):
if os.path.exists('tracklet_videos.pkl'):
with open('tracklet_videos.pkl', 'rb') as f:
videos, labels, participants = pickle.load(f)
return videos, labels, participants
else:
files = [f for f in glob.glob(rf'C:\GitHub\Keypoint-LSTM\datasets\stills\MMISB Cropped\**\*.txt', recursive=True) if
'classes' not in f]
videos = [[] for x in range(len(classes))]
labels = [[] for x in range(len(classes))]
participants = [[] for x in range(len(classes))]
processed_files = []
for f in files:
if f not in processed_files:
video_name = str(pathlib.Path(f).parent)
video = [x for x in files if str(pathlib.Path(x).parent) == video_name in x]
sort_nicely(video)
# if len(video) != 15:
# print(video_name)
label = pathlib.Path(f).parts[-3]
participant = pathlib.Path(f).parts[-4]
if label in classes:
processed_files.extend(video)
videos[classes.index(label)].append(video)
labels[classes.index(label)].append(label)
participants[classes.index(label)].append(participant)
else:
continue
with open('tracklet_videos.pkl', 'wb') as f:
pickle.dump((videos, labels, participants), f)
return videos, labels, participants
def create_video_sample(video, image_size, tracklet, img_read=cv2.IMREAD_UNCHANGED):
frame_tensors = []
# writer = imageio.get_writer(f'debug_video.mp4',
# fps=8.0)
# height, width = 0, 0
# for still in self.accepted_c_stills:
# if still.shape[0] > width:
# width = still.shape[0]
# if still.shape[1] > height:
# height = still.shape[1]
# for still in self.accepted_c_stills:
# writer.append_data(cv2.resize(still, (height, width)))
# writer.close()
for anno in video:
with open(anno) as f:
annotations = f.readlines()
annotation = None
if len(annotations) > 0:
if len(annotations) == 1:
annotation = annotations[0].strip()
elif len([x for x in annotations if int(x.split(' ')[0]) == 0]) == 1:
annotation = [x for x in annotations if int(x.split(' ')[0]) == 0][0]
else:
continue
if annotation and tracklet:
if int(annotation.split(' ')[0]) == 0:
_, x, y, w, h = map(float, annotation.split(' '))
frame = cv2.imread(anno[:-3] + 'png', img_read)
dh, dw = frame.shape[0], frame.shape[1]
l = int((x - w / 2) * dw)
t = int((y - h / 2) * dh)
w = int(w * dw)
h = int(h * dh)
cropped_image = frame[t:t+h, l:l+w]
resized_crop = cv2.resize(cropped_image, (image_size, image_size))
# writer.append_data(resized_crop)
if img_read == cv2.IMREAD_UNCHANGED:
resized_frame = resized_crop[:, :, [0, 1, 2]]
else:
resized_frame = resized_crop
frame_tensors.append(resized_frame)
else:
# Just load the entire frame if no annotation found
frame = cv2.imread(anno[:-3] + 'png', img_read)
resized_crop = cv2.resize(frame, (image_size, image_size))
# writer.append_data(resized_crop)
if img_read == cv2.IMREAD_UNCHANGED:
resized_frame = resized_crop[:, :, [0, 1, 2]]
else:
resized_frame = resized_crop
frame_tensors.append(resized_frame)
return np.array(frame_tensors)
def create_tracklet_pandas_dataset(classes, model_choice, image_size, seq_len,
output_dir, spec, tracklet, channels=1, split=0.8, subject='p001'):
num_features = FeatureExtractorFeatures[int(spec)]
if tracklet:
train_filepath = f'{output_dir}/tracklet/train_{image_size}_{num_features}_{int(spec)}.pkl'
else:
train_filepath = f'{output_dir}/full_frame/train_{image_size}_{num_features}_{int(spec)}.pkl'
if os.path.exists(train_filepath):
with open(train_filepath, 'rb') as f:
v, l, p = pickle.load(f)
else:
# Build the feature extractor
if model_choice != 1:
feature_extractor = build_feature_extractor(spec, image_size)
# num_features = feature_extractor.output_shape[1]
print(f"Number of Features: {num_features}")
else:
feature_extractor = None
num_features = None
batch_size = 2
videos, labels, participants = load_preliminary_dataset(classes)
v, l, p = [], labels, participants
for class_idx, vid in enumerate(videos):
print(f"Processing {class_idx} / {len(videos)}...")
if num_features:
frame_v = np.zeros(
shape=(len(vid), seq_len, num_features), dtype="float32"
)
else:
frame_v = np.zeros(
shape=(len(vid), seq_len, image_size, image_size, channels), dtype="float32"
)
for idx, video in enumerate(vid):
# print(f"Processing {idx} / {len(vid)}...")
frames = create_video_sample(video, image_size, tracklet, img_read=cv2.IMREAD_GRAYSCALE)
frames = frames[None, ...]
# Initialize placeholder to store the features of the current video.
if num_features:
temp_frame_features = np.zeros(
shape=(1, seq_len, num_features), dtype="float32"
)
else:
temp_frame_features = np.zeros(
shape=(1, seq_len, image_size, image_size, channels), dtype="float32"
)
if feature_extractor:
# Extract features from the frames of the current video.
for i, batch in enumerate(frames):
video_length = batch.shape[0]
length = min(seq_len, video_length)
for j in range(length):
if np.mean(batch[j, :]) > 0.0:
temp_frame_features[i, j, :] = feature_extractor.predict(
np.squeeze(batch[None, j, :]), verbose=0
)
else:
temp_frame_features[i, j, :] = 0.0
frame_v[idx, ] = temp_frame_features.squeeze()
else:
for i, batch in enumerate(frames):
video_length = batch.shape[0]
length = min(seq_len, video_length)
for j in range(length):
temp_frame_features[i, j, :] = cv2.cvtColor(batch[j, :], cv2.COLOR_BGR2GRAY)[..., None]
frame_v[idx, ] = temp_frame_features
v.append(frame_v)
with open(train_filepath, 'wb') as f:
pickle.dump((v, l, p), f)
train_videos, train_labels, train_participants = [], [], []
test_videos, test_labels, test_participants = [], [], []
train_samples, test_samples = 0, 0
for videos, labels, participants in zip(v, l, p):
if videos.shape[0] and labels:
if videos.shape[0] != 1:
if labels[0] in classes:
if type(subject) is str:
training_samples = [i for i, x in enumerate(videos) if participants[i] != subject]
train_size = len(training_samples)
else:
train_size = math.floor(len(videos) * split)
training_samples = np.array(random.sample(list(np.arange(len(videos))), train_size))
train_v = videos[training_samples]
train_l = list(np.array(labels)[training_samples])
train_p = list(np.array(participants)[training_samples])
train_videos[train_samples:train_samples+train_size] = train_v
train_samples += train_size
train_labels.extend(train_l)
train_participants.extend(train_p)
testing_samples = [x for x in np.arange(len(videos)) if x not in training_samples]
test_v = videos[testing_samples]
test_l = list(np.array(labels)[testing_samples])
test_p = list(np.array(participants)[testing_samples])
test_videos[test_samples:test_samples+(len(videos)-train_size)] = test_v
test_samples += (len(videos) - train_size)
test_labels.extend(test_l)
test_participants.extend(test_p)
return np.array(train_videos), np.array(train_labels), np.array(test_videos), np.array(test_labels)
def create_pandas_dataset(data_dir):
videos, classes = load_videos_sorted_dir(data_dir)
train_videos, eval_videos = [], []
train_tags, eval_tags = [], []
for video_class, classe in zip(videos, classes):
# np.random.shuffle(video_class)
if classe in all_classes:
train_index = int(len(video_class) * 0.9)
train_videos.extend(video_class[:train_index])
train_tags.extend([all_classes.index(classe)] * train_index)
eval_videos.extend(video_class[train_index:])
eval_tags.extend([all_classes.index(classe)] * (len(video_class) - train_index))
train = pd.DataFrame()
test = pd.DataFrame()
# for video in train_videos:
# train_tags.append(all_class_list[int(pathlib.Path(video).stem.split('_')[-3])])
# for video in eval_videos:
# eval_tags.append(all_class_list[int(pathlib.Path(video).stem.split('_')[-3])])
train['video_name'] = train_videos
train['tag'] = train_tags
train = train[:-1]
train.head()
test['video_name'] = eval_videos
test['tag'] = eval_tags
test = test[:-1]
test.head()
train_new = train.reset_index(drop=True)
test_new = test.reset_index(drop=True)
train_new.to_csv("train.csv", index=False)
test_new.to_csv("test.csv", index=False)
def create_csv_dataset(data_dir):
videos = load_videos(data_dir)
train_index = int(len(videos) * 0.8)
train_videos, eval_videos = videos[:train_index], videos[train_index:]
with open(r'old_results/dataset\train_video_csv_file.csv', 'w', newline='') as f:
writer = csv.writer(f)
writer.writerow(['video_name', 'tag'])
for video in train_videos:
tag = pathlib.Path(video).stem.split('_')[-3]
writer.writerow([str(video), str(tag)])
with open(r'old_results/dataset\val_video_csv_file.csv', 'w', newline='') as f:
writer = csv.writer(f)
writer.writerow(['video_name', 'tag'])
for video in eval_videos:
tag = pathlib.Path(video).stem.split('_')[-3]
writer.writerow([str(video), str(tag)])
def create_video_dataset(data_dir):
output_file = r'/old_results/dataset\video_dataset.pkl'
if os.path.exists(output_file):
dataset = torch.load(output_file)
return dataset
videos = load_videos(data_dir)
event_ticker = [0] * 3
x_data = [[] for i in range(3)]
y_data = [[] for i in range(3)]
print("Parsing video dataset...")
for video in videos:
tag = pathlib.Path(video).stem.split('_')[-3]
event_ticker[int(tag)] = event_ticker[int(tag)] + 1
video_data = cv2.VideoCapture(video)
clips = []
while video_data.isOpened():
ret, frame = video_data.read()
if ret:
frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
clips.append(project_utils.transform(image=frame)['image'])
else:
break
input_frames = np.array(clips)
# add an extra dimension
input_frames = np.expand_dims(input_frames, axis=0)
# transpose to get [1, 3, num_clips, height, width]
input_frames = np.transpose(input_frames, (0, 4, 1, 2, 3))
# convert the frames to tensor
input_frames = torch.tensor(input_frames, dtype=torch.float32)
# tag = torch.tensor(frame_tag[1], dtype=torch.long)
target = [int(tag)]
# target = np.expand_dims(target, axis=0)
target = torch.tensor(target, dtype=torch.int64)
x_data[int(tag)].append(input_frames)
y_data[int(tag)].append(target)
print("Finished parsing video dataset...")
x_train, y_train, x_eval, y_eval, x_test, y_test = [], [], [], [], [], []
# Shuffle data in order
# np.random.shuffle(data)
for x, y in zip(x_data, y_data):
data = list(zip(x, y))
np.random.shuffle(data)
x, y = zip(*data)
# np.random.shuffle(data)
train_index = int(len(x) * 0.7)
test_index = int(len(x) * 0.2) + train_index
x_train.extend(x[:train_index])
y_train.extend(y[:train_index])
x_eval.extend(x[train_index:test_index])
y_eval.extend(y[train_index:test_index])
x_test.extend(x[test_index:])
y_test.extend(y[test_index:])
dataloader_dict = {'train': (x_train, y_train),
'val': (x_eval, y_eval),
'test': (x_test, y_test)
}
print("Writing dataset to file...")
torch.save(dataloader_dict, output_file)
print(event_ticker)
return dataloader_dict
def create_dataset(video_dir, tag_dir, tag_width, split=[0.7, 0.2, 0.1], output_dir=None, tag_filter=None):
video_output_dir = r"/old_results/dataset\video_datasets\event_videos"
loaded = False
if tag_filter:
dataset = [[] for i in range(len(tag_filter))]
else:
dataset = [[] for i in range(tag_count)]
if output_dir:
if os.path.exists(os.path.join(output_dir, f"dataset_{tag_width}.pkl")):
print("Dataset already exists, loading...")
output_file = os.path.join(output_dir, f"dataset_{tag_width}.pkl")
dataset = torch.load(output_file)
loaded = True
if tag_filter:
if len(dataset) != len(tag_filter):
print("Dataset is not the same as requested, regenerating...")
dataset = [[] for i in range(len(tag_filter))]
loaded = False
else:
if len(dataset) != tag_count:
print("Dataset is not the same as requested, regenerating...")
dataset = [[] for i in range(len(tag_count))]
loaded = False
if not loaded:
print("Generating dataset...")
frames = load_frame_tags(tag_dir)
videos = load_videos(video_dir)
event_count = 0
for video in videos:
end_frame = -1
print(f"Parsing {video}...")
frame_tags = frames[pathlib.Path(video).stem]
video_data = cv2.VideoCapture(video)
frame_width = int(video_data.get(3))
frame_height = int(video_data.get(4))
for frame_tag in frame_tags:
if tag_filter:
if not frame_tag[1] in tag_filter:
continue
else:
tag = tag_filter.index(frame_tag[1])
else:
tag = frame_tag[1]
data = dict()
raw_clips, clips = [], []
start_frame = int(abs(frame_tag[2] - tag_width / 2))
# # If we added a no-behavior, then filter out any that conflict with the last event
# if start_frame < end_frame:
# continue
end_frame = int(abs(frame_tag[2] + tag_width / 2))
out = cv2.VideoWriter(os.path.join(video_output_dir, f"{pathlib.Path(video).stem}_{tag}_{start_frame}_{end_frame}_{event_count}.mp4"),
cv2.VideoWriter_fourcc(*'mp4v'), 8,
(frame_width, frame_height))
event_count += 1
for i in range(start_frame, end_frame):
video_data.set(cv2.CAP_PROP_POS_FRAMES, i)
ret, frame = video_data.read()
if ret:
out.write(frame)
frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
clips.append(project_utils.transform(image=frame)['image'])
input_frames = np.array(clips)
# add an extra dimension
input_frames = np.expand_dims(input_frames, axis=0)
# transpose to get [1, 3, num_clips, height, width]
input_frames = np.transpose(input_frames, (0, 4, 1, 2, 3))
# convert the frames to tensor
input_frames = torch.tensor(input_frames, dtype=torch.float32)
# tag = torch.tensor(frame_tag[1], dtype=torch.long)
data["Clip"] = input_frames
data["Tag"] = tag
data["Path"] = video
dataset[tag].append(data)
if output_dir:
print("Writing dataset to file...")
output_file = os.path.join(output_dir, f"dataset_{tag_width}.pkl")
torch.save(dataset, output_file)
train_dataset, eval_dataset, test_dataset = [], [], []
for data in dataset:
# np.random.shuffle(data)
train_index = int(len(data) * split[0])
test_index = int(len(data) * split[1]) + train_index
train_dataset.extend(data[:train_index])
eval_dataset.extend(data[train_index:test_index])
test_dataset.extend(data[test_index:])
# np.random.shuffle(train_dataset)
# np.random.shuffle(eval_dataset)
# np.random.shuffle(test_dataset)
print("Finished generating dataset...")
return train_dataset, eval_dataset, test_dataset
def generate_raw_data(tag_dir, video_dir, output_dir):
output_files = []
event_ticker = [0] * tag_count
_, _, files = next(os.walk(tag_dir))
_, _, video_files = next(os.walk(video_dir))
total_tags = 0
print("Parsing DataPal files to pickle format...")
for file in files:
start_parsing = False
output_dict = {
"Freq": [],
"Dur": []
}
with open(os.path.join(tag_dir, file), 'r') as f:
paused = False
adjust_time = 0
for line in f:
split = line.split(":")
if len(split) > 1:
output_dict[split[0]] = split[1].strip()
if "EVENT RECORDING START" in line:
start_parsing = True
elif start_parsing:
splits = line.split(',')
for i in range(0, len(splits)):
splits[i] = str(splits[i].strip("\""))
if splits[0] == "PauseTime":
end_time = float(splits[3])
paused = True
if splits[0] == "SessionTime" and paused:
start_time = float(splits[3])
adjust_time = adjust_time + (start_time - end_time)
paused = False
if splits[0] == "Freq":
output_dict[splits[0]].append((str(splits[2]), float(splits[3]) - adjust_time))
if splits[0] == "Dur":
output_dict[splits[0]].append((str(splits[2]), float(splits[3]) - adjust_time))
if splits[0] == "End":
output_dict["Session Length"] = float(splits[3]) - adjust_time
print("File: {0} | Adjusted Session Length: {1}".format(file, output_dict["Session Length"]))
output_file = os.path.join(output_dir, pathlib.Path(file).stem + ".pkl")
output_files.append(output_file)
with open(output_file, 'wb') as f:
pickle.dump(output_dict, f)
print("Finished DataPal parsing to pickle format...\n")
print("Adjusting event times for session length...")
for video_file, output_file in zip(video_files, output_files):
with open(output_file, 'rb') as f:
tags = pickle.load(f)
cap = cv2.VideoCapture(os.path.join(video_dir, video_file))
if not cap.isOpened():
print('Error while trying to read video. Please check path again')
continue
frames = cap.get(cv2.CAP_PROP_FRAME_COUNT)
fps = cap.get(cv2.CAP_PROP_FPS)
seconds = int(frames / fps)
frame_tags = []
cap.release()
time_adj = abs(seconds - float(tags["Session Length"]))
print("\nVideo Length (s):", seconds, "| Adjustment:", time_adj, "| Path:", video_file)
for i in range(0, len(tags["Freq"])):
tags["Freq"][i] = (tags["Freq"][i][0], tags["Freq"][i][1] + time_adj)
for i in range(0, len(tags["Dur"])):
tags["Dur"][i] = (tags["Dur"][i][0], tags["Dur"][i][1] + time_adj)
freq_tags = tags["Freq"]
dur_tags = tags["Dur"]
nb_tags = []
for tag in freq_tags:
frame = int(tag[1] * fps)
tag_value = freq_tag_lut[tag[0]]
event_ticker[tag_value] = event_ticker[tag_value] + 1
print(f"Video: {video_file} | Event: {tag[0]} | Event Code: {tag_value} | Frame Index: {frame} | Timestamp: {int(tag[1] / 60)}:{int(tag[1] % 60)}")
frame_tags.append((tag[0], tag_value, frame, tag[1], fps))
total_tags = total_tags + 1
for tag in dur_tags:
frame = int(tag[1] * fps)
tag_value = dur_tag_lut[tag[0]]
event_ticker[tag_value] = event_ticker[tag_value] + 1
print(f"Video: {video_file} | Event: {tag[0]} | Event Code: {tag_value} | Frame Index: {frame} | Timestamp: {int(tag[1] / 60)}:{int(tag[1] % 60)}")
frame_tags.append((tag[0], tag_value, frame, tag[1], fps))
total_tags = total_tags + 1
for i in range(1, len(frame_tags)):
event_ticker[all_tags["no-behavior"]] = event_ticker[all_tags["no-behavior"]] + 1
frame = int(frame_tags[i - 1][2] + ((frame_tags[i][2] - frame_tags[i - 1][2]) / 2))
video_time = frame / fps
nb_tags.append(('no-behavior', nb_tag, frame, video_time, fps))
if len(frame_tags) % 2:
event_ticker[all_tags["no-behavior"]] = event_ticker[all_tags["no-behavior"]] + 1
frame = int(frame_tags[-1][2] + ((seconds - frame_tags[-1][2]) / 2))
video_time = frame / fps
nb_tags.append(('no-behavior', nb_tag, frame, video_time, fps))
frame_tags.extend(nb_tags)
# Put the events in order by frame
frame_tags.sort(key=lambda y: y[2])
with open(output_file, 'wb') as f:
pickle.dump(frame_tags, f)
print("Finished adjusting event times, found {0} tags...\n".format(total_tags))
for tag in all_tags:
print(f"{tag}: {event_ticker[all_tags[tag]]}")
| Munroe-Meyer-Institute-VR-Laboratory/Aggression-Detection | dataloader_utils.py | dataloader_utils.py | py | 25,420 | python | en | code | 0 | github-code | 50 |
8542873671 | from tkinter import *
from PIL import Image, ImageTk
import random as rd
import copy
class PlacedImage():
def __init__(self, path, x, y, deg, height=128, width=128):
self.path = path
self.x = x
self.y = y
self.deg = deg
self.height = height
self.width = width
def getImageTk(self):
img = Image.open(self.path).resize((self.height, self.width))
img_r = img.rotate(self.deg)
return ImageTk.PhotoImage(img_r)
def getCoord(self):
return (self.x, self.y)
class Imagelist():
def __init__(self, size=128):
self.list = []
self.size = size
def setImageSize(self, newSize):
self.size = newSize
def addDefault(self, name, x, y, rot=0):
self.list.append(PlacedImage("default_modules/"+name+".png", self.size*x, self.size*y, 90*rot))
def add(self, path, x, y, rot=0):
self.list.append(PlacedImage(path, self.size*x, self.size*y, 90*rot, height=self.size, width=self.size))
def getImageTkList(self):
mapped = []
for pi in self.list:
mapped.append(pi.getImageTk())
return mapped
def getCoordList(self):
mapped = []
for pi in self.list:
mapped.append(pi.getCoord())
return mapped
class StreetLocations():
def __init__(self, string, cornerstring, custom=None):
if custom is None:
custom = ["-" * 16, "-" * 16, "-" * 16, "-" * 16]
self.loc = string #steets
self.corners = cornerstring #corners
self.custom = custom #custom connections
self.custom_important = [False, False, False, False]
self.key = ["", "", "", ""]
def getCustomImportant(self, direction):
direction = direction % 4
return self.custom_important[direction]
def setCustomImportant(self, direction, toSet):
direction = direction % 4
self.custom_important[direction] = toSet
def getKey(self, direction):
direction = direction % 4
return self.key[direction]
def setKey(self, direction, key):
direction = direction % 4
self.key[direction] = key
def rotate(self, rotation):
real_rot = rotation % 4
for i in range(real_rot):
self.loc = self.loc[3] + self.loc[:3]
self.corners = self.corners[3] + self.corners[:3]
newcustom = [self.custom[3], self.custom[0], self.custom[1], self.custom[2]]
self.custom = newcustom
newkey = [self.key[3], self.key[0], self.key[1], self.key[2]]
self.key = newkey
newcustom_important = [self.custom_important[3], self.custom_important[0], self.custom_important[1], self.custom_important[2]]
self.custom_important = newcustom_important
def setCustomConnection(self, side, index, connectionKey):
# side must be 0-3, index 0-15
if(len(connectionKey) == 1):
l = list(self.custom[side])
l[index] = connectionKey
self.custom[side] = "".join(l)
def getCustomConnection(self, side, index):
return self.custom[side][index]
def customConnectionMatch(self, other, side, compareToNextAlso=False):
side = side % 4
other_side = (side + 2) % 4
assumption = True
for i in range(16):
toCompare = self.getCustomConnection(side, i)
if(toCompare == other.getCustomConnection(other_side, 15-i)):
continue
if(i < 15 and compareToNextAlso):
#compare other one lower
if(toCompare == other.getCustomConnection(other_side, 14-i)):
continue
if(i > 0 and compareToNextAlso):
#compare other one more
if(toCompare == other.getCustomConnection(other_side, 16-i)):
continue
assumption = False
break
return assumption
def __repr__(self):
sides = []
if (self.loc[0] == '1'):
sides.append("right")
if (self.loc[1] == '1'):
sides.append("top")
if (self.loc[2] == '1'):
sides.append("left")
if (self.loc[3] == '1'):
sides.append("bottom")
sides_nice = ", ".join(sides)
cornersides = []
if (self.corners[0] == '1'):
cornersides.append("upper-right")
if (self.corners[1] == '1'):
cornersides.append("upper-left")
if (self.corners[2] == '1'):
cornersides.append("lower-left")
if (self.corners[3] == '1'):
cornersides.append("lower-right")
corners_nice = ", ".join(cornersides)
return "The Module has a Street on the " + sides_nice + " side and a corner on the " + corners_nice + " side. Cornerstring: " + self.corners + ", Customs:\n" +\
str(self.custom)
class SimpleImage():
def __init__(self, path, rot, isDefaultImage=False):
self.streetlocations = None
if(isDefaultImage):
self.path = "default_modules/"+path+".png"
if(path == "O"):
self.streetlocations = StreetLocations("1111", "1111")
if(path == "I00"):
self.streetlocations = StreetLocations("0010", "0110")
if(path == "I01"):
self.streetlocations = StreetLocations("0010", "1110")
if(path == "I10"):
self.streetlocations = StreetLocations("0010", "0111")
if(path == "I11"):
self.streetlocations = StreetLocations("0010", "1111")
if(path == "II"):
self.streetlocations = StreetLocations("0101", "1111")
if(path == "L0"):
self.streetlocations = StreetLocations("1001", "1011")
if(path == "L1"):
self.streetlocations = StreetLocations("1001", "1111")
if(path == "None0000"):
self.streetlocations = StreetLocations("0000", "0000")
if(path == "None0001"):
self.streetlocations = StreetLocations("0000", "0001")
if(path == "None0011"):
self.streetlocations = StreetLocations("0000", "0011")
if(path == "None0101"):
self.streetlocations = StreetLocations("0000", "0101")
if(path == "None0111"):
self.streetlocations = StreetLocations("0000", "0111")
if(path == "None1111"):
self.streetlocations = StreetLocations("0000", "1111")
if(path == "U"):
self.streetlocations = StreetLocations("1101", "1111")
self.streetlocations.rotate(rot)
else:
self.path = path
self.rot = rot
self.defaultImage = isDefaultImage
def getKey(self, direction):
direction = direction % 4
return self.streetlocations.key[direction]
def isDefault(self):
return self.defaultImage
def setStreetLocations(self, streetlocationselement):
self.streetlocations = streetlocationselement
self.streetlocations.rotate(self.rot)
def isCompatible(self, other, location, mindCustoms=False):
"""
Determines if image is compatible to another image, when other image is placed on location (0 = right, 1 = top, 2 = left, 3 = bottom) relative to this image.
"""
if(self.streetlocations == None or other.streetlocations == None):
return False
real_location = location % 4
other_real_location = (real_location + 2) % 4
#print(f"self ({self.path}) custom: {self.streetlocations.custom[real_location]}, other ({other.path}) custom: {other.streetlocations.custom[other_real_location]}, mindCustoms: {mindCustoms}")
this_street_on_location = self.streetlocations.loc[real_location]
other_street_on_location = other.streetlocations.loc[other_real_location]
if(this_street_on_location == '1' and other_street_on_location == '1'):
if(mindCustoms and not self.isDefault()):
return self.streetlocations.customConnectionMatch(other.streetlocations, location)
else:
return True
if(this_street_on_location == '0' and other_street_on_location == '0'):
this_before_location = (real_location + 3) % 4
other_before_location = (other_real_location + 3) % 4
this_corner_on_location1 = self.streetlocations.corners[real_location]
this_corner_on_location0 = self.streetlocations.corners[this_before_location]
other_corner_on_location1 = other.streetlocations.corners[other_real_location]
other_corner_on_location0 = other.streetlocations.corners[other_before_location]
if(this_corner_on_location1 == other_corner_on_location0 and this_corner_on_location0 == other_corner_on_location1):
if (mindCustoms and not self.isDefault()):
return self.streetlocations.customConnectionMatch(other.streetlocations, location)
else:
return True
return False
def __repr__(self):
return "SimpleImage: Path: " + self.path + ", Rot: " + str(self.rot) + ", StreetLocations: " + str(self.streetlocations) + "\n"
class AllDefaultAllRotationsImageList():
def __init__(self):
list = []
for i in range(4):
list.append(SimpleImage("I00", i, isDefaultImage=True))
for i in range(4):
list.append(SimpleImage("I01", i, isDefaultImage=True))
for i in range(4):
list.append(SimpleImage("I10", i, isDefaultImage=True))
for i in range(4):
list.append(SimpleImage("I11", i, isDefaultImage=True))
for i in range(2):
list.append(SimpleImage("II", i, isDefaultImage=True))
for i in range(4):
list.append(SimpleImage("L0", i, isDefaultImage=True))
for i in range(4):
list.append(SimpleImage("L1", i, isDefaultImage=True))
for i in range(1):
list.append(SimpleImage("None0000", i, isDefaultImage=True))
for i in range(1):
list.append(SimpleImage("None0001", i, isDefaultImage=True))
for i in range(1):
list.append(SimpleImage("None0011", i, isDefaultImage=True))
for i in range(1):
list.append(SimpleImage("None0101", i, isDefaultImage=True))
for i in range(1):
list.append(SimpleImage("None0111", i, isDefaultImage=True))
for i in range(1):
list.append(SimpleImage("None1111", i, isDefaultImage=True))
for i in range(1):
list.append(SimpleImage("O", i, isDefaultImage=True))
for i in range(4):
list.append(SimpleImage("U", i, isDefaultImage=True))
self.list = list
class ImageArray():
def __init__(self):
self.dict = {}
self.highPriority = [] #list for high priority customs (must be placed, are always considered first)
def addHighPriority(self, path, streetlocations):
self.highPriority.append((path, streetlocations))
def enrichPriority(self):
out = []
list = self.highPriority
for t in list:
path = t[0]
streetlocations = t[1]
for r in range(4):
si = SimpleImage(path, r)
streetlocations_copy = copy.copy(streetlocations)
si.setStreetLocations(streetlocations_copy)
out.append(si)
return out
def removeFromPriority(self, path):
list = self.highPriority
for e in list:
if(e[0] == path):
list.remove(e)
break
self.highPriority = list
def getImage(self, x, y):
return self.dict[(x, y)]
def addImage(self, path, x, y, rot, default=False, streetlocations=None):
si = SimpleImage(path, rot, isDefaultImage=default)
if(streetlocations != None):
si.streetlocations = streetlocations
self.dict[(x, y)] = si
def addDefaultImage(self, name, x, y, rot=0):
self.dict[(x, y)] = SimpleImage(name, rot, isDefaultImage=True)
def toImageList(self, size=128):
out = Imagelist(size)
for im in self.dict:
x = im[0]
y = im[1]
image = self.dict[im]
out.add(image.path, x, y, image.rot)
return out
def __repr__(self):
out = "ImageArray: \n"
for key in self.dict:
out += "Item " + str(self.dict[key]) + " at position " + str(key) + ".\n"
return out
def getNeighbors(self, x, y):
"""
returns all neighbors next to x and y, as well as their relative direction
"""
out = []
if((x+1, y) in self.dict):
out.append((self.getImage(x+1, y), 0))
if((x, y-1) in self.dict):
out.append((self.getImage(x, y-1), 1))
if((x-1, y) in self.dict):
out.append((self.getImage(x-1, y), 2))
if((x, y+1) in self.dict):
out.append((self.getImage(x, y+1), 3))
return out
def getNeighbor(self, x, y, direction):
"""
returns the neighbor next to x and y in the specified direction
"""
if(direction == 0):
if((x+1, y) in self.dict):
return self.getImage(x+1, y)
else:
return None
if(direction == 1):
if((x, y-1) in self.dict):
return self.getImage(x, y-1)
else:
return None
if(direction == 2):
if((x-1, y) in self.dict):
return self.getImage(x-1, y)
else:
return None
if(direction == 3):
if((x, y+1) in self.dict):
return self.getImage(x, y+1)
else:
return None
def getNeighborCoords(self, x, y, direction):
if (direction == 0):
return (x + 1, y)
if (direction == 1):
return (x, y - 1)
if (direction == 2):
return (x - 1, y)
if (direction == 3):
return (x, y + 1)
def isOutside(self, x, y, max_x, max_y):
return x < 0 or x >= max_x or y < 0 or y >= max_y
def countDefaults(self):
sum = 0
for key in self.dict:
if (self.dict[key].isDefault()):
sum += 1
return sum
def init_array(self, size_x, size_y, mindCustoms=False, consumeCustoms=True, verbose=False):
"""
Initializes an image array with width size_x and height size_y.
The high priority images are tried to be placed first
The default images are selected random with equal probability.
Output is always correct if success is True.
"""
self.dict = {}
success = True #assumption
x = -1
while (x+1 < size_x):
x += 1
y = -1
while (y+1 < size_y):
y += 1
if verbose: print(f"({x},{y})")
highPrio = self.enrichPriority()
allDefaults = AllDefaultAllRotationsImageList().list
#check if a high priority module matches
for direction in range(4):
neighbor = self.getNeighbor(x, y, direction)
neighbor_coords = self.getNeighborCoords(x, y, direction)
neighbor_outside = self.isOutside(neighbor_coords[0], neighbor_coords[1], size_x, size_y)
if verbose: print(f"outside: {neighbor_outside}")
stillPossible = []
for possible in highPrio:
#compute neighborKey
if(neighbor != None):
neighborKey = neighbor.getKey((direction + 2) % 4)
elif(neighbor_outside):
neighborKey = ""
else:
neighborKey = None
possibleKey = possible.getKey(direction)
if verbose: print(f"ok: {possibleKey} (d: ({direction})")
if verbose: print(f"nk: {neighborKey} (d: {(direction + 2) % 4})")
# check for fitting
if(neighborKey == None or possibleKey == neighborKey):
if verbose: print("possibleKey == neighborKey")
if(neighbor == None or possible.isCompatible(neighbor, direction, mindCustoms=mindCustoms)):
stillPossible.append(possible)
highPrio = stillPossible
if verbose: print(f"Possibles: {highPrio}")
if(len(highPrio) == 0 and mindCustoms):
#high possible is empty, mindCustoms is true, so this restriction might be to big
#now same algorithm, but if custom connection is not important, mindcustoms is false
#print("No Possbiles found. Check for non-important custom connections.")
highPrio = self.enrichPriority()
for direction in range(4):
neighbor = self.getNeighbor(x, y, direction)
neighbor_coords = self.getNeighborCoords(x, y, direction)
neighbor_outside = self.isOutside(neighbor_coords[0], neighbor_coords[1], size_x, size_y)
if verbose: print(f"outside: {neighbor_outside}")
stillPossible = []
for possible in highPrio:
if (neighbor != None):
neighborKey = neighbor.getKey((direction + 2) % 4)
neighbor_important = neighbor.streetlocations.getCustomImportant((direction + 2) % 4)
elif(neighbor_outside):
neighborKey = ""
else:
neighborKey = None
possibleKey = possible.getKey(direction)
# check for fitting
if (neighborKey == None or possibleKey == neighborKey):
if verbose: print("possibleKey == neighborKey")
if (neighbor == None or possible.isCompatible(neighbor, direction, mindCustoms = (possible.streetlocations.getCustomImportant(direction) or neighbor_important))):
stillPossible.append(possible)
highPrio = stillPossible
if(len(highPrio) != 0):
#possible found
choose = rd.choice(highPrio)
self.addImage(choose.path, x, y, choose.rot, streetlocations=choose.streetlocations)
if(consumeCustoms):
self.removeFromPriority(choose.path)
continue #continue with next field
#check if a default module matches
for direction in range(4):
neighbor = self.getNeighbor(x, y, direction)
stillPossible = []
for possible in allDefaults:
if(neighbor == None or possible.isCompatible(neighbor, direction, mindCustoms=mindCustoms)):
stillPossible.append(possible)
allDefaults = stillPossible
if(len(allDefaults) == 0):
print("no default found..")
success = False
break
else:
choose = rd.choice(allDefaults)
self.addDefaultImage(choose.path[16:-4:], x, y, choose.rot)
if((not choose.isDefault) and consumeCustoms):
self.removeFromPriority(choose.path, high=False)
if(not success):
break
return success
| leogummersbach/micropolis-autoplace | image_array.py | image_array.py | py | 20,410 | python | en | code | 0 | github-code | 50 |
72984219036 | from python_tsl2591 import tsl2591
import os
import subprocess
import time
import influxdb_client
from influxdb_client.client.write_api import SYNCHRONOUS
bucket = "main"
org = "Main"
token = os.environ['INFLUX_DB_TOKEN']
url = "http://34.122.138.205:8086"
client = influxdb_client.InfluxDBClient(
url=url,
token=token,
org=org
)
write_api = client.write_api(write_options=SYNCHRONOUS)
tsl = tsl2591()
while True:
try:
# {'lux': 514.892736, 'full': 19038, 'ir': 7761, 'gain': 16, 'integration_time': 1}
data = tsl.get_current()
print("Current Light measurement: " + str(data))
p = influxdb_client.Point("light_measurement").tag("location", "Warsaw").tag("model", "RaspberryPi Zero W").field("light_lux", data["lux"])
write_api.write(bucket=bucket, org=org, record=p)
p = influxdb_client.Point("light_measurement").tag("location", "Warsaw").tag("model", "RaspberryPi Zero W").field("light_full", data["full"])
write_api.write(bucket=bucket, org=org, record=p)
p = influxdb_client.Point("light_measurement").tag("location", "Warsaw").tag("model", "RaspberryPi Zero W").field("light_ir", data["ir"])
write_api.write(bucket=bucket, org=org, record=p)
time.sleep(5)
except Exception as e:
print("Exception!")
print(e)
continue
| kmazur/plants | python/reporting/light_sensor.py | light_sensor.py | py | 1,353 | python | en | code | 0 | github-code | 50 |
72598569436 | from math import ceil
from typing import Union
from .backends import NumpyBackend, TorchBackend
from .truncator import SvdTruncator, QrTruncator, EigTruncator, QrTruncatorWithCBE
class iTEBD:
"""
Conventions:
MPS tensors, e.g. B have legs [vL, p, vR], i.e. left virtual, physical, right virtual
S matrices either [vL, vR] for QR based TEBD (S is an actual matrix), or [v] for SVD based (S is diagonal)
S_list[i] sits on the bond (i-1)----(i)
"""
def __init__(self, B_list, S_list,
backend: Union[NumpyBackend, TorchBackend],
truncator: Union[SvdTruncator, QrTruncator, EigTruncator, QrTruncatorWithCBE],
cbe_increase_fraction: float = None,
):
self.B_list = [backend.asarray(B) for B in B_list] # [vL, p, vR]
self.L = len(B_list) # length of unit cell
self.S_list = [backend.asarray(S) for S in S_list] # either [vL, vR] or [v]
self.backend = backend
self.truncator = truncator
if isinstance(truncator, QrTruncatorWithCBE):
assert cbe_increase_fraction is not None
self.cbe_increase_fraction = cbe_increase_fraction
def get_B(self, i):
return self.B_list[i % self.L]
def get_S(self, i):
return self.S_list[i % self.L]
def set_B(self, i, B):
self.B_list[i % self.L] = B
def set_S(self, i, S):
self.S_list[i % self.L] = S
def current_chi(self):
return max(B.shape[0] for B in self.B_list)
def sweep(self, bond_gates, chi_max, direction='R', num_qr_iters=1, Z_init_from_old_B=True, compute_err=True):
"""
Perform a single TEBD sweep, i.e. apply every bond gate once, sequentially.
bond_gates[i] is to be applied to sites (i, i+1) and has legs [p1, p2, p1*, p2*].
returns a list of truncation errors
"""
if direction == 'R':
return self._right_sweep(bond_gates, chi_max, num_qr_iters=num_qr_iters,
Z_init_from_old_B=Z_init_from_old_B, compute_err=compute_err)
elif direction == 'L':
self.mirror_mps()
bond_gates = [self.backend.transpose(gate, (1, 0, 3, 2)) for gate in reversed(bond_gates)]
res = self._right_sweep(bond_gates, chi_max, num_qr_iters=num_qr_iters,
Z_init_from_old_B=Z_init_from_old_B, compute_err=compute_err)
self.mirror_mps()
return res
else:
raise ValueError
def _right_sweep(self, bond_gates, chi_max, num_qr_iters=1, Z_init_from_old_B=True, compute_err=True):
errs = []
rel_norm_change = 0
for i in range(self.L):
C = self.backend.tensordot(self.get_B(i), self.get_B(i + 1), ((2,), (0,))) # [vL, p1, p2, vR]
C = self.backend.tensordot(C, bond_gates[i], ((1, 2), (2, 3))) # [vL, vR, p1, p2]
C = self.backend.transpose(C, (0, 2, 3, 1)) # [vL, p1, p2, vR]
theta = self.truncator.apply_left_S(self.get_S(i), C) # [vL, p1, p2, vR])
if isinstance(self.truncator, QrTruncatorWithCBE):
chi, d, *_ = theta.shape
eta = max(100, ceil((1 + self.cbe_increase_fraction) * chi))
if eta != chi:
Z_init_from_old_B = False
else:
eta = None
X, Y, Z, N, trunc_err, theta_norm_sq = self.truncator.decompose(
theta, chi_max=chi_max, num_iters=num_qr_iters,
Z_init=self.get_B(i + 1) if Z_init_from_old_B else None,
compute_err=compute_err, eta=eta
)
# <psi(t+dt)|psi(t+dt)> / <psi(t)|psi(t)> - 1
rel_norm_change = theta_norm_sq - 1
# inversion-free TEBD trick: want S^{-1} @ X @ Y here,
# can use that C = S^{-1} @ theta = N * S^{-1} @ X @ Y @ Z ; and that Z is orthogonal
new_B_i = self.backend.tensordot(C, self.backend.conj(Z), ((2, 3), (1, 2))) / N
self.set_B(i, new_B_i)
self.set_S(i + 1, Y)
self.set_B(i + 1, Z)
errs.append(trunc_err)
if compute_err:
err = sum(errs)
else:
err = None
return err, rel_norm_change
def mirror_mps(self):
self.B_list = [self.backend.transpose(B, (2, 1, 0)) for B in reversed(self.B_list)]
self.S_list = [self.truncator.mirror_S(S) for S in reversed(self.S_list)]
def site_expvals(self, O_list):
"""
expectation values of site operators. O_list[i] acts on site i and has legs [p, p*].
does not assume hermiticity and computes complex values.
"""
assert len(O_list) == self.L
expvals = []
for i in range(self.L):
sB = self.truncator.apply_left_S(self.get_S(i), self.get_B(i)) # [vL, p, vR]
C = self.backend.tensordot(sB, O_list[i], ((1,), (1,))) # [vL, vR, p]
val = self.backend.tensordot(C, self.backend.conj(sB), ((0, 1, 2), (0, 2, 1)))
expvals.append(float(self.backend.real(val).item()))
return expvals
def bond_expvals(self, O_list):
"""expectation values of site operators. O_list[i] acts on sites i, i+1 and has legs [p1, p2, p1*, p2*]
does not assume hermiticity and computes complex values."""
assert len(O_list) == self.L
expvals = []
for i in range(self.L):
sB = self.truncator.apply_left_S(self.get_S(i), self.get_B(i)) # [vL, p, vR]
sBB = self.backend.tensordot(sB, self.get_B(i + 1), ((2,), (0,))) # [vL, p1, p2, vR]
C = self.backend.tensordot(sBB, O_list[i], ((1, 2), (2, 3))) # [vL, vR, p1, p2]
val = self.backend.tensordot(C, self.backend.conj(sBB), ((0, 2, 3, 1), (0, 1, 2, 3)))
expvals.append(val.item())
return expvals
def entanglement_entropy(self, i=0):
"""half-chain von-Neumann entanglement entropy on bond (i-1)---(i)"""
return self.truncator.vN_entropy(self.get_S(i))
def mps_to_np(self):
"""
Return the MPS in the following format:
A tuple, the first entry is the list of B tensors as numpy arrays, the second the list of S tensors as np arrays
"""
return [self.backend.to_np(B) for B in self.B_list], [self.backend.to_np(S) for S in self.S_list]
| Jakob-Unfried/Fast-Time-Evolution-of-MPS-using-QR | code/tebd.py | tebd.py | py | 6,404 | python | en | code | 0 | github-code | 50 |
39610267400 | import requests
import json
def api1_call():
url = "https://apigateway-econtract-staging.vnptit3.vn/auth-service/oauth/token"
payload = json.dumps({
"grant_type": "client_credentials",
"client_id": "test.client@econtract.vnpt.vn",
"client_secret": "U30nrmdko76057dz5aQvV9ug0mTsqAQy"
})
headers = {
'Content-Type': 'application/json'
}
response = requests.request("POST", url, headers=headers, data=payload)
return response
| huynhbaokhanh/khanhhb-vttagg | api1.py | api1.py | py | 456 | python | en | code | 0 | github-code | 50 |
4948936869 | from direct.showbase.DirectObject import DirectObject
from direct.actor.Actor import Actor
class Player(DirectObject):
def __init__(self, parent, resetMouse):
"""
It's assumed parent is render
"""
self.resetMouse = resetMouse
self.actor = Actor("panda", {"walk": "panda-walk"})
self.actor.reparentTo(parent)
self.parent = parent
# don't use -repeat because of slight delay after keydown
self.actorWalk = False
self.actorReverse = False
self.actorLeft = False
self.actorRight = False
self.accept("w", self.beginWalk)
self.accept("w-up", self.endWalk)
self.accept("s", self.beginReverse)
self.accept("s-up", self.endReverse)
self.accept("a", self.beginTurnLeft)
self.accept("a-up", self.endTurnLeft)
self.accept("d", self.beginTurnRight)
self.accept("d-up", self.endTurnRight)
taskMgr.add(self.updateactor, "update actor")
def beginWalk(self):
self.actor.setPlayRate(1.0, "walk")
self.actor.loop("walk")
self.actorWalk = True
def endWalk(self):
self.actor.stop()
self.actorWalk = False
def beginReverse(self):
self.actor.setPlayRate(-1.0, "walk")
self.actor.loop("walk")
self.actorReverse = True
def endReverse(self):
self.actor.stop()
self.actorReverse = False
def beginTurnLeft(self):
self.actorLeft = True
def endTurnLeft(self):
self.actorLeft = False
def beginTurnRight(self):
self.actorRight = True
def endTurnRight(self):
self.actorRight = False
def updateactor(self, task):
# in case we need to restore position due to collisions
start_position = self.actor.getPos()
if base.mouseWatcherNode.hasMouse():
self.actor.setH(self.actor, -base.mouseWatcherNode.getMouseX() * 10)
taskMgr.add(self.resetMouse, "reset mouse")
if self.actorWalk:
self.actor.setY(self.actor, -0.2)
elif self.actorReverse:
self.actor.setY(self.actor, 0.2)
if self.actorLeft:
self.actor.setH(self.actor, 0.8)
elif self.actorRight:
self.actor.setH(self.actor, -0.8)
return task.cont | ryancollingwood/panda3d-test | player.py | player.py | py | 2,411 | python | en | code | 0 | github-code | 50 |
73821526874 | import pickle
from DataProcessing import ModuleStormReader as Msr
from DataProcessing import ModuleReanalysisData as Mre
from DataProcessing import MyModuleFileFolder as MMff
def load_data_storm_interim(types, size_crop=7, levtype='sfc', pkl_inputfile='./data/tracks.pkl', folder_data='./data/',
folder_saving='./features/', folderLUT='./data/', levels=None, flag_write_ys=True,
flag_write_X=True, history=0):
print(levels, flush=True)
print(flag_write_ys, flush=True)
print(flag_write_X, flush=True)
print(folder_saving, flush=True)
print(levtype, flush=True)
print(history, flush=True)
list_tracks=Msr.load_list_tracks_from_pkl(pkl_inputfile=pkl_inputfile)
dict_tracks={}
for track in list_tracks:
dict_tracks[track.stormid]=track
year_curr=list_tracks[0].dates[0][0]
for track in list_tracks:
# if track.dates[0][0]< 2010: # or track.dates[0][0]>1988:
# continue
year = track.dates[0][0]
if year>year_curr:
print(str(year), flush=True)
year_curr=year
range_windows = range(int(track.Ninstants-1))
### get X ###
# get grid data
if flag_write_X:
X=Mre.get_windows_from_track_interim(track, range_windows, types, size_crop=size_crop,
folder_data=folder_data, levtype=levtype, folderLUT=folderLUT,
levels=levels,history=history)
if flag_write_ys:
y_curr_cat = []
y_next_disp = []
y_curr_longlat=[]
y_windspeed=[]
for delay in range_windows:
#### get Y #######
y_curr_cat.append(track.categories[delay])
y_next_disp.append(Msr.get_disp_long_lat(track.stormid, delay,
delay +1,storm=dict_tracks[track.stormid]))
y_curr_longlat.append([track.longitudes[delay],track.latitudes[delay]])
y_windspeed.append(track.windspeeds[delay])
y_curr_cat.append(track.categories[-1])
y_curr_longlat.append([track.longitudes[-1], track.latitudes[-1]])
y_windspeed.append(track.windspeeds[-1])
#### write files #####
if flag_write_X:
folder_saving_X=folder_saving+'X_'+levtype+'_crop'+str(size_crop)+'_r_vo_w/'
if history:
folder_saving_X=folder_saving_X[:-1]+'_historic'+str(6*history)+'h/'
print(folder_saving_X, flush=True)
MMff.MakeDir(folder_saving_X)
with open(folder_saving_X+str(track.stormid)+'.pkl', 'wb') as file_X:
pickle.dump({'grids':X},file_X)
if flag_write_ys:
MMff.MakeDir(folder_saving + 'y_categories2/')
infos='curr_cat : current category of the storm (between 0 and 7). From t=0 to t=n \n'
with open(folder_saving + 'y_categories2/' + str(track.stormid) + '.pkl', 'wb') as file_cats:
pickle.dump({'curr_cat': y_curr_cat, 'infos':infos}, file_cats)
MMff.MakeDir(folder_saving + 'y_disp2/')
infos = 'next_disp : next displacement (delta(longitude),delta(latitude)), in degree. From t=0 to t=n-1 \n'
infos = infos+'curr_longlat: current longitude and latitude in degrees. From t=0 to t=n'
with open(folder_saving + 'y_disp2/' + str(track.stormid) + '.pkl', 'wb') as file_disp :
pickle.dump({'next_disp': y_next_disp, 'curr_longlat':y_curr_longlat, 'infos':infos}, file_disp)
MMff.MakeDir(folder_saving + 'y_wind2/')
infos = 'Current mean value of maximum sustained winds using a 10-minute average. \nIn knots. \nFrom t=0 to t=1.'
with open(folder_saving + 'y_wind2/' + str(track.stormid) + '.pkl', 'wb') as file_wind :
pickle.dump({'curr_wind': y_windspeed, 'infos': infos}, file_wind)
print('Extracting data complete!', flush=True)
| sophiegif/FusionCNN_hurricanes | DataProcessing/ModuleFeatures.py | ModuleFeatures.py | py | 4,057 | python | en | code | 20 | github-code | 50 |
16848030702 | import sys
import copy
f = open("input.txt", "r")
lines = f.readlines()
f.close()
#1 @ 912,277: 27x20
class Region:
i = 0
x = 0
y = 0
w = 0
h = 0
regions = []
for line in lines:
parts1 = [p.replace("#", "").strip() for p in line.split("@")]
r = Region()
r.i = parts1[0]
parts2 = parts1[1].split(": ")
parts3 = parts2[0].split(",")
r.x = int(parts3[0])
r.y = int(parts3[1])
parts4 = parts2[1].split("x")
r.w = int(parts4[0])
r.h = int(parts4[1])
#print("p0: ", parts1[0], ", p1: ", parts1[1])
#print(r.i, " ", r.x, " ", r.y, " ", r.w, " ", r.h)
regions.append(r)
#for r in regions:
# print("#", r.i, " @ ", r.x, ",", r.y, ": ", r.w, "x", r.h)
fabric = [[0 for x in range(1000)] for y in range(1000)]
for r in regions:
for x in range(r.x, r.x + r.w):
for y in range(r.y, r.y + r.h):
fabric[x][y] += 1
doubles = 0;
for x in range(1000):
for y in range(1000):
if fabric[x][y] > 1:
doubles += 1
print("doubles: ", doubles)
for r in regions:
ok = True
for x in range(r.x, r.x + r.w):
for y in range(r.y, r.y + r.h):
if fabric[x][y] > 1:
ok = False
if ok:
print("Ok claim:", r.i)
| torkeldanielsson/AoC_2018 | 03/program.py | program.py | py | 1,270 | python | en | code | 0 | github-code | 50 |
41800750578 | import csv
import matplotlib
import numpy as np
matplotlib.use('TkAgg')
import matplotlib.pyplot as plt
from statistics import stdev
legend = []
values = []
markers = ["o", "*", "^", "h", "s", "D"]
colors = ["b", "g", "r", "c", "m", "k"]
for i in range(6):
values.append(([], []))
# Read BFI values from csv file but exclude random feature selection
# [attributes, instances, classes]
data_set_info = []
data_set_bfis = []
with open('/Users/wmostert/Development/cos700researchproject/out/dataSetInformation.csv') as csv_file:
csv_reader = csv.reader(csv_file, delimiter=';')
line_count = 0
for row in csv_reader:
if line_count > 0:
data_set_info.append(map(lambda x: int(x), row[2:len(row)-1]))
line_count += 1
with open('/Users/wmostert/Development/cos700researchproject/out/bfiTable.csv') as csv_file:
csv_reader = csv.reader(csv_file, delimiter=';')
line_count = 0
for row in csv_reader:
if line_count == 0:
# legend = row[2:-1]
legend = map(lambda x: "A" + str(x), range(1, 7))
if line_count > 1:
bfi_values = map(lambda x: float(x.replace(",", ".")), row[2:-1])
data_set_bfis.append(bfi_values)
##Change what is measured here
measure = data_set_info[line_count - 2][1]
for i in range(0, len(bfi_values)):
print(str(i) + " - " + str(measure))
x = measure
y = bfi_values[i]
values[i][0].append(x)
values[i][1].append(y)
line_count += 1
fig, ax = plt.subplots()
for i in range(6):
x = values[i][0]
y = values[i][1]
ax.scatter(x, y, label=legend[i], marker=markers[i], c=colors[i])
ax.legend(fontsize="small", loc='lower center', bbox_to_anchor=(0.5, -.3), ncol=2)
ax.grid(True)
plt.xlabel("# Instances")
plt.ylabel("BFI(s)")
fig.subplots_adjust(bottom=0.2)
fig.savefig("out/scatter-instances.png", pad_inches=1, dpi=150)
std_dev_bfi_vals = [(max(bfis) - min(bfis)) for bfis in data_set_bfis]
stdev_x = [info[1] for info in data_set_info]
fig, ax = plt.subplots()
ax.scatter(stdev_x, std_dev_bfi_vals, c='k')
corr = np.corrcoef(stdev_x, std_dev_bfi_vals)
print("Correlation coefficient is : " + str(corr))
ax.grid(True)
plt.xlabel("# Instances")
plt.ylabel("Range")
# plt.xlim(0, 800)
fig.savefig("out/scatter-instances-range.png", pad_inches=1, dpi=150)
| WMostert1/cos700researchproject | StatisticalTests/scatter_plot.py | scatter_plot.py | py | 2,439 | python | en | code | 0 | github-code | 50 |
25564357080 | from typing import List
class Solution:
def maxProduct(self, nums: List[int]) -> int:
N = len(nums)
if N == 0:
return 0
dp_max = [0] * (N + 1)
dp_min = [0] * (N + 1)
import sys
res = 0 - sys.maxsize
dp_max[0] = 1
dp_min[0] = 1
for i in range(1, N + 1):
if nums[i - 1] < 0:
dp_max[i - 1], dp_min[i - 1] = dp_min[i - 1], dp_max[i - 1]
dp_min[i] = min(nums[i - 1], dp_min[i - 1] * nums[i - 1])
dp_max[i] = max(nums[i - 1], dp_max[i - 1] * nums[i - 1])
res = max(res, dp_max[i])
return res
def maxProduct2(self, nums: List[int]) -> int:
import sys
res = 0 - sys.maxsize
imax, imin = 1, 1
for n in nums:
if n < 0:
imax, imin = imin, imax
imax = max(n, imax * n)
imin = min(n, imin * n)
res = max(res, imax)
return res
sol = Solution()
print(sol.maxProduct([2, 3, -2, 4]))
print(sol.maxProduct2([2, 3, -2, 4]))
print(sol.maxProduct([-2, 0, -1]))
print(sol.maxProduct2([-2, 0, -1]))
| Symbolk/AlgInPy | DP/152maximum-product-subarray.py | 152maximum-product-subarray.py | py | 1,151 | python | en | code | 3 | github-code | 50 |
27339011988 | from flask import render_template, url_for
from . import main_page_bp
@main_page_bp.route("/")
def index():
template_values = {
"login_url": url_for("admin_auth.login"),
}
return render_template("admin_main_page/index.html", **template_values)
| nydkc/dkc-application | src/admin/main_page/index.py | index.py | py | 266 | python | en | code | 0 | github-code | 50 |
9874025472 | import os # importing env vars
from twitchio.ext import commands
bot = commands.Bot(
irc_token=os.environ['TMI_TOKEN'],
client_id=os.environ['CLIENT_ID'],
nick=os.environ['BOT_NICK'],
prefix=os.environ['BOT_PREFIX'],
initial_channels=[os.environ['CHANNEL']]
)
@bot.event
async def event_ready():
print(f"{os.environ['BOT_NICK']} is online!")
ws = bot._ws # needed to send messages within event_ready
await ws.send_privmsg(os.environ['CHANNEL'], f"/me, nice to be here!")
@bot.event
async def event_message(ctx):
if ctx.author.name.lower() == os.environ['BOT_NICK'].lower():
return
await ctx.channel.send(ctx.content)
if __name__ == "__main__":
bot.run()
| kristin1502/TheSeedling | bot.py | bot.py | py | 699 | python | en | code | 0 | github-code | 50 |
74164593754 | #!/usr/bin/env python3
import click
import tame
from tame.recipes.utils import load_traj_seg
@click.command(name='persist',
options_metavar='[options]',
short_help='persistent time')
@load_traj_seg # general input handler
@click.option('--max-dt', metavar='', default=30.0, show_default=True)
@click.option('-t', '--tags', metavar='', default='1,1:SSP:3,4', show_default=True)
@click.option('--tcf-out', metavar='', default='persist', show_default=True)
def persist_cmd(seg, dt,
tag, tcf_out, max_dt):
"""Computes the time correlation function
See the documentation for detailed descriptions of the command:
https://Teoroo-CMC.github.io/tame/latest/recipe/persist
"""
import numpy as np
import tame.math as tm
from tame.ops import tavg, tpairsurvive
from tame.io import load_traj
n_cache = int(max_dt/dt)
tags = tags.split(' ')
corrs = {tag: # time correlation functions, one for each tag
tavg(tm.mean(tm.sum(tpairsurvive(
data, tag, n_cache, d_cache), axis=2), axis=1))
for tag in tags}
seg.run()
TIME = np.arange(0, n_cache)*dt
CORRS = {'t': TIME}
for k in keys:
CORRS[k] = corrs[k].eval()
return {tcf_out: CORRS}
| yqshao/tame | tame/recipes/persist.py | persist.py | py | 1,301 | python | en | code | 0 | github-code | 50 |
6724325988 | import os
from flask import Flask, make_response
app = Flask(__name__)
app.config["SECRET_KEY"] = os.environ.get(
"SECRET_KEY", "secret_l801#+#a&^1mz)_p&qyq51j51@20_74c-xi%&i)b*u_dt^2=2key"
)
@app.route("/")
def boilerplate_script():
script = "git clone --quiet https://github.com/CheesecakeLabs/django-drf-boilerplate.git && echo 'Thanks for cloning our repository! Have a very cheesy day!'"
response = make_response(script, 200)
response.mimetype = "text/plain"
return response
if __name__ == "__main__":
port = int(os.environ.get("PORT", 5000))
app.run(host="0.0.0.0", port=port)
| fredericojordan/boilerplate-script | boilerplate.py | boilerplate.py | py | 616 | python | en | code | 0 | github-code | 50 |
10926638608 | import os
import pytest
from chess_analysis import download_pgn
from data.pgn_text import LASKER_GAME
TEST_LIVE = os.getenv("TEST_LIVE", False)
def test_get_download_url():
# Normal behavior
game_id = "112358"
filename = "johnson_lasker_1926.pgn"
expected = "https://www.chessgames.com/pgn/johnson_lasker_1926.pgn?gid=112358"
url = download_pgn.get_download_url(game_id, filename)
assert isinstance(url, str)
assert url == expected
# Filename not specified
game_id = "112358"
expected = "https://www.chessgames.com/pgn/game.pgn?gid=112358"
url = download_pgn.get_download_url(game_id)
assert isinstance(url, str)
assert url == expected
@pytest.mark.skipif(TEST_LIVE is not True, reason="Skipping expensive tests")
def test_download():
url = "https://www.chessgames.com/pgn/game.pgn?gid=1380448"
expected = LASKER_GAME
pgn = download_pgn.download(url)
assert isinstance(pgn, str)
assert [l.strip() for l in pgn.split("\n")] == expected
| mdashx/chess-analysis | tests/test_download_pgn.py | test_download_pgn.py | py | 1,016 | python | en | code | 0 | github-code | 50 |
42547803889 |
import os.path
import yaml
# f = open(os.path.dirname(__file__) + '/../brownie-config.yml')
# print(f)
current_directory = os.path.dirname(__file__)
parent_directory = os.path.split(current_directory)[0] # Repeat as needed
parent_parent_directory = os.path.split(parent_directory)[0] # Repeat as needed
file_path = os.path.join(parent_parent_directory, 'front_end/src/App.tsx')
f = open(file_path)
print(f)
| tonisives/ti-python-crypto-codecamp | lesson-thirteen/brownie/scripts/test.py | test.py | py | 417 | python | en | code | 0 | github-code | 50 |
34655300854 | _author_ = 'jake'
_project_ = 'leetcode'
# https://leetcode.com/problems/remove-duplicates-from-sorted-array/
# Given a sorted array, remove the duplicates in place such that each element appear only once and return the new length.
# Do not allocate extra space for another array, you must do this in place with constant memory.
# Maintain a pointer to the next index to be filled with a new number. Check every number against the previous num
# (if any) and if different, move to the next_new index.
# Time - O(n)
# Space - O(1)
class Solution(object):
def removeDuplicates(self, nums):
"""
:type nums: List[int]
:rtype: int
"""
next_new = 0 # index where the next unique number is to be moved to
for i in range(len(nums)):
if i == 0 or nums[i] != nums[i - 1]:
nums[next_new] = nums[i]
next_new += 1
return next_new
| jakehoare/leetcode | python_1_to_1000/026_Remove_Duplicates_from_Sorted_Array.py | 026_Remove_Duplicates_from_Sorted_Array.py | py | 933 | python | en | code | 49 | github-code | 50 |
29543510024 | from selenium import webdriver
from selenium.webdriver.chrome.options import Options
# from bs4 import BeautifulSoup
# from extractors.wwr import extract_wwr_jobs
# base_url = "https://kr.indeed.com/jobs?q="
# search_term = "python"
# response = get(f"{base_url}{search_term}")
# if response != 200:
# print("Can't request page.")
# else:
# print(response.text)
options = Options()
options.add_argument("--no-sandbox")
options.add_argument("--disable-dev-shm-usage")
browser = webdriver.Chrome(options=options)
browser.get(
"https://kr.indeed.com/jobs?q=python&l=&from=searchOnHP&vjk=1015284880e2ff62"
)
print(browser.page_source)
| kanujoa/Python_scrapper | 5. Job Scrapper/5-12 Indeed/fix403.py | fix403.py | py | 644 | python | en | code | 0 | github-code | 50 |
37960506579 | print( 'Павловська Катерина. КМ-93. Варіант №14.' )
print("Task: Make a program that determines the result of divination on the camomile - love-does not love, taking the original given number of petals n (enter from the keyboard)." )
print()
print('You are welcomed by the guessing program')
import re
re_integer = re.compile("^[-+]?\d+$")
def validator(pattern, promt):
text = input( promt )
while not bool( pattern.match( text ) ):
text = input( promt )
return text
Petails = int( validator( re_integer, "Print some number of Peatils " ) )
while Petails <= 3:
print( "The given values are incorrect, which flower does not exist, please try again" )
Petails = int( input( "Number of petals: " ) )
Version = int(
validator( re_integer, "If you want to start guessing with likes press - 1, if with dislikes press - 2" ) )
if Version == 1:
print( 'You started guessing with loves' )
elif Version == 2:
print( 'You started guessing with dislikes' )
while Version != 1 and Version != 2:
print(
"You have entered an incorrect value if you want to start guessing with likes press-1, if with dislikes press-2" )
Version = int( input( "If you want to start guessing with likes press - 1, if with dislikes press - 2" ) )
if Petails // 2 == 0 and Version == 1:
print( 'Your result of divination does not like' )
elif Petails // 2 != 0 and Version == 1:
print( "Your divination result loves" )
elif Petails // 2 == 0 and Version == 2:
print( 'Your divination result loves' )
elif Petails // 2 != 0 and Version == 2:
print( 'Your divination result loves' )
print( "The end" )
| KatePavlovska/python-laboratory | laboratory1&2update/Lab1_Task2_the_guessing_pavlovska_km_93.py | Lab1_Task2_the_guessing_pavlovska_km_93.py | py | 1,719 | python | en | code | 0 | github-code | 50 |
32173442293 | import redis
# r = redis.Redis(host='localhost', port=6379, db=1)
class Base(object):
def __init__(self):
self.r = redis.Redis(host='localhost', port=6379, db=1)
class TestZset(Base):
def test_zadd(self):
"""ZADD命令将一个或多个 member 元素及其 score 值加入到有序集 key 当中
redis.zadd('my-key', 'name1', 1.1, 'name2', 2.2, name3=3.3, name4=4.4)"""
z = ['google.com', 1, 'baidu.com', 2, 'huizhi.com', 3]
result = self.r.zadd('rank', *z)
print(result)
rest = self.r.zrange('rank', 0, -1, withscores=True)
print(rest)
def test_zrem(self):
"ZREM命令可以移除指定成员"
result = self.r.zrem('rank', 'baidu.com')
print(result)
rest = self.r.zrange('rank', 0, -1, withscores=True)
print(rest)
def test_zscore(self):
"ZSCORE命令来获取成员评分, ZSCORE key member"
result = self.r.zscore('rank', 'google.com')
print(result)
return result
def test_zcard(self):
"ZCARD查看集合成员的数量, ZCARD key"
result = self.r.zcard('rank')
print(result)
return result
def test_zcount(self):
"ZCOUNT命令可以设定评分的最小和最大值, ZCOUNT key min max"
result = self.r.zcount('rank', 1, 2)
print(result)
return result
def test_zrank(self):
"ZRANK依据 评分(score) 值递增(从小到大)顺序排列, ZRANK key member"
result = self.r.zrank('rank', 'huizhi.com') # 获取排名
print(result)
return result
def test_zincrby(self):
"ZINCRBY命令可以为给定的成员评分值加上增量, ZINCRBY key increment member"
result = self.r.zincrby('rank', 'huizhi.com', 3) # 获取排名
print(result)
return result
def main():
zset_obj = TestZset()
# zset_obj.test_zadd()
# zset_obj.test_zrem()
# zset_obj.test_zscore()
# zset_obj.test_zcard()
# zset_obj.test_zcount()
# zset_obj.test_zrank()
zset_obj.test_zincrby()
if __name__ == '__main__':
main()
| huazhicai/shengxun | database/redis/test_zset_redis.py | test_zset_redis.py | py | 2,142 | python | en | code | 0 | github-code | 50 |
312517426 | #-*- python -*-
""" Syslog log observer """
from __future__ import absolute_import, division, print_function
import syslog
from zope.interface import implementer
from twisted.logger import ILogObserver
from twisted.logger import LogLevel
from twisted.logger import formatEvent
# These defaults come from the Python syslog docs.
DEFAULT_OPTIONS = 0
DEFAULT_FACILITY = syslog.LOG_USER
# Map the twisted LogLevels up against the syslog values
LOGLEVEL_MAP = {
LogLevel.debug: syslog.LOG_DEBUG,
LogLevel.info: syslog.LOG_INFO,
LogLevel.warn: syslog.LOG_WARNING,
LogLevel.error: syslog.LOG_ERR,
LogLevel.critical: syslog.LOG_CRIT,
}
@implementer(ILogObserver)
class SyslogObserver(object):
"""
A log observer for logging to syslog.
"""
openlog = syslog.openlog
syslog = syslog.syslog
def __init__(self, prefix, options=DEFAULT_OPTIONS,
facility=DEFAULT_FACILITY):
"""
"""
self.openlog(prefix, options, facility)
def __call__(self, event):
"""
Write event to syslog.
"""
# Figure out what the message-text is.
eventText = formatEvent(event)
if eventText is None:
return
# Figure out what syslog parameters we might need to use.
level = event.get("log_level", None)
if level is None:
if 'log_failure' in event:
level = LogLevel.critical
else:
level = LogLevel.info
priority = LOGLEVEL_MAP[level]
facility = int(event.get('log_facility', DEFAULT_FACILITY))
# Break the message up into lines and send them.
lines = eventText.split('\n')
while lines[-1:] == ['']:
lines.pop()
firstLine = True
for line in lines:
if firstLine:
firstLine = False
else:
line = ' ' + line
self.syslog(priority | facility,
'[%s] %s' % (event.get('log_system', '-'), line))
| sveinse/lumina | lumina/syslog.py | syslog.py | py | 2,053 | python | en | code | 1 | github-code | 50 |
18018347486 | from .core import Core, Settings
class Download(Core):
host = 'https://artifacts.elastic.co/downloads/beats/elastic-agent/{endpoint}'
endpoint = Settings.download_endpoint
kwargs = {
'stream': True
}
def parse_response(self, response):
self.__logger.debug('Saving file to download path: {}'.format(Settings.download_path))
with open(Settings.download_path, 'wb+') as f:
for chunk in response.raw.stream(1024, decode_content=False):
if chunk:
f.write(chunk)
self.__logger.debug('File saved successfully')
| MSAdministrator/elastic-agent-setup | elastic_agent_setup/download.py | download.py | py | 608 | python | en | code | 3 | github-code | 50 |
39208639638 | #function "max()" accepts two nums & returns max of them
# function is a part of Python syntax
def max(a, b):
if a > b:
return a
else:
return b
print(max(3, 5))
print(max(5, 3))
print(max(int(input("Enter First Number: ")), int(input("Enter Second Number: "))))
| izzyward02/IFSC1202 | 06.00.10 Max.py | 06.00.10 Max.py | py | 290 | python | en | code | 0 | github-code | 50 |
23571007904 | from re import fullmatch
from copy import deepcopy
from itertools import product, chain, repeat, islice
import heapq
from math import inf
import multiprocessing
from timeit import default_timer
from board import Board
class Player:
def __init__(self, player, walls, game):
self.player = player
self.vertical_walls = walls
self.horizontal_walls = walls
self.game = game
self.profiling = True
def print_player_info(self):
print(f"Playing: {self.__class__.__name__} '{self.player}'")
print(f"Vertical walls: {self.vertical_walls}")
print(f"Horizontal walls: {self.horizontal_walls}")
def print_winner(self, moves):
print('-' * 50)
print(f"WINNER IN {moves} MOVES: {self.__class__.__name__} '{self.player}'".center(50, '-'))
print('-' * 50)
# Inputs the move from the user or gets the move from the computer and packs it into the following format:
# ((player, player_pawn, new_row, new_col), optional(wall_type, row, col))
def get_move(self, board):
pass
def get_computer_move(self, board):
start = None
if self.profiling:
start = default_timer()
moves = self.legal_board_moves(board, all_moves=False)
if len(moves) == 0:
return None
# Spawn child processes for as many moves
with multiprocessing.Pool() as pool:
evaluations = pool.starmap(self.minimax_caller, zip(repeat(board), moves))
best_evaluation, best_move = \
max(zip(evaluations, moves)) if self.player == 'X' else min(zip(evaluations, moves))
if start is not None:
print(f"Computer move time: {default_timer() - start}")
return best_move
# Plays the move on a new board state which it returns after updating the number of player walls if update_walls
def play_move(self, board, move, update_walls=True):
new_board = deepcopy(board)
self.in_place_play_move(new_board, move, update_walls)
return new_board
def in_place_play_move(self, board, move, update_walls=True, undo=False):
undo_move = board.move_pawn(*(move[0]))
if len(move) == 2:
board.place_wall(*(move[1]))
# Update the number of walls
if update_walls:
if move[1][0] == 'Z':
self.vertical_walls += 1 if undo else -1
else:
self.horizontal_walls += 1 if undo else -1
return board, (undo_move, (*(move[1]), not undo)), update_walls, not undo
return board, (undo_move,), update_walls, not undo
def iter_next_legal_board_states(self, board, moves=None):
return map(lambda move: self.play_move(board, move, update_walls=False),
self.legal_board_moves(board) if moves is None else moves)
def legal_board_moves(self, board, all_moves=True):
if self.vertical_walls > 0 or self.horizontal_walls > 0:
return self.legal_pawn_wall_move_combinations(board, self.legal_pawn_moves(board, all_moves=all_moves),
self.legal_wall_placements(board, all_moves=all_moves))
else:
return tuple(map(lambda move: (move,), self.legal_pawn_moves(board)))
def legal_pawn_moves(self, board, all_moves=True):
pawns = board.player_1_pawns if self.player == 'X' else board.player_2_pawns
pawn_moves = tuple(chain(
map(lambda l: (self.player, 0, *l), self.iter_legal_jumps(board, pawns[0][0], pawns[0][1])),
map(lambda l: (self.player, 1, *l), self.iter_legal_jumps(board, pawns[1][0], pawns[1][1]))
))
# Sort pawn moves by static evaluation
static_evaluations = [0] * len(pawn_moves)
for i, pawn_move in enumerate(pawn_moves):
undo_move = board.move_pawn(*pawn_move)
static_evaluations[i] = board.static_evaluation()
board.move_pawn(*undo_move)
pawn_moves = tuple(pawn_move for _, pawn_move in sorted(zip(static_evaluations, pawn_moves),
reverse=self.player == 'X')
)
return pawn_moves if all_moves else \
tuple(chain(
islice(filter(lambda move: move[1] == 0, pawn_moves), 2),
islice(filter(lambda move: move[1] == 1, pawn_moves), 2)
))
# Returns all legal pawn jumps from the square with the row and column
def iter_legal_jumps(self, board, row, column):
source_square = board.board[row][column]
# Top-side
if row > 0:
# Top-Left
if column > 0 and \
not source_square.top_left() and \
not board.board[row - 1][column - 1].bottom_right() and \
not (source_square.left and board.board[row - 1][column].left) and \
not (source_square.top and board.board[row][column - 1].top) and \
(
(board.board[row - 1][column - 1].starting is not None and
board.board[row - 1][column - 1].starting != self.player) or
(board.board[row - 1][column - 1].center != 'X' and
board.board[row - 1][column - 1].center != 'O')
):
yield row - 1, column - 1
# Top-Right
if column < board.columns - 1 and \
not source_square.top_right() and \
not board.board[row - 1][column + 1].bottom_left() and \
not (source_square.right and board.board[row - 1][column].right) and \
not (source_square.top and board.board[row][column + 1].top) and \
(
(board.board[row - 1][column + 1].starting is not None and
board.board[row - 1][column + 1].starting != self.player) or
(board.board[row - 1][column + 1].center != 'X' and
board.board[row - 1][column + 1].center != 'O')
):
yield row - 1, column + 1
# Top
if not source_square.top:
# Top-Short
if (board.board[row - 1][column].starting is not None and
board.board[row - 1][column].starting != self.player) or \
(
board.board[row - 1][column].center != 'X' and
board.board[row - 1][column].center != 'O' and
(
row > 1 and
not board.board[row - 1][column].top and
(board.board[row - 2][column].center == 'X' or
board.board[row - 2][column].center == 'O')
)
):
yield row - 1, column
# Top-Long
if row > 1 and \
not board.board[row - 1][column].top and \
(
(board.board[row - 2][column].starting is not None and
board.board[row - 2][column].starting != self.player) or
(board.board[row - 2][column].center != 'X' and
board.board[row - 2][column].center != 'O')
):
yield row - 2, column
# Bottom-side
if row < board.rows - 1:
# Bottom-Left
if column > 0 and \
not source_square.bottom_left() and \
not board.board[row + 1][column - 1].top_right() and \
not (source_square.left and board.board[row + 1][column].left) and \
not (source_square.bottom and board.board[row][column - 1].bottom) and \
(
(board.board[row + 1][column - 1].starting is not None and
board.board[row + 1][column - 1].starting != self.player) or
(board.board[row + 1][column - 1].center != 'X' and
board.board[row + 1][column - 1].center != 'O')
):
yield row + 1, column - 1
# Bottom-Right
if column < board.columns - 1 and \
not source_square.bottom_right() and \
not board.board[row + 1][column + 1].top_left() and \
not (source_square.right and board.board[row + 1][column].right) and \
not (source_square.bottom and board.board[row][column + 1].bottom) and \
(
(board.board[row + 1][column + 1].starting is not None and
board.board[row + 1][column + 1].starting != self.player) or
(board.board[row + 1][column + 1].center != 'X' and
board.board[row + 1][column + 1].center != 'O')
):
yield row + 1, column + 1
# Bottom
if not source_square.bottom:
# Bottom-Short
if (board.board[row + 1][column].starting is not None and
board.board[row + 1][column].starting != self.player) or \
(
board.board[row + 1][column].center != 'X' and
board.board[row + 1][column].center != 'O' and
(
row < board.rows - 2 and
not board.board[row + 1][column].bottom and
(board.board[row + 2][column].center == 'X' or
board.board[row + 2][column].center == 'O')
)
):
yield row + 1, column
# Bottom-Long
if row < board.rows - 2 and \
not board.board[row + 1][column].bottom and \
(
(board.board[row + 2][column].starting is not None and
board.board[row + 2][column].starting != self.player) or
(board.board[row + 2][column].center != 'X' and
board.board[row + 2][column].center != 'O')
):
yield row + 2, column
# Left
if column > 0 and not source_square.left:
if (board.board[row][column - 1].starting is not None and
board.board[row][column - 1].starting != self.player) or \
(
board.board[row][column - 1].center != 'X' and
board.board[row][column - 1].center != 'O' and
(
column > 1 and
not board.board[row][column - 1].left and
(board.board[row][column - 2].center == 'X' or
board.board[row][column - 2].center == 'O')
)
):
yield row, column - 1
# Left-Long
if column > 1 and \
not board.board[row][column - 1].left and \
(
(board.board[row][column - 2].starting is not None and
board.board[row][column - 2].starting != self.player) or
(board.board[row][column - 2].center != 'X' and
board.board[row][column - 2].center != 'O')
):
yield row, column - 2
# Right
if column < board.columns - 1 and not source_square.right:
# Right-Short
if (board.board[row][column + 1].starting is not None and
board.board[row][column + 1].starting != self.player) or \
(
board.board[row][column + 1].center != 'X' and
board.board[row][column + 1].center != 'O' and
(
column < board.columns - 2 and
not board.board[row][column + 1].right and
(board.board[row][column + 2].center == 'X' or
board.board[row][column + 2].center == 'O')
)
):
yield row, column + 1
# Right-Long
if column < board.columns - 2 and \
not board.board[row][column + 1].right and \
(
(board.board[row][column + 2].starting is not None and
board.board[row][column + 2].starting != self.player) or
(board.board[row][column + 2].center != 'X' and
board.board[row][column + 2].center != 'O')
):
yield row, column + 2
def legal_wall_placements(self, board, all_moves=True):
wall_moves = []
for row in range(board.rows - 1):
for column in range(board.columns - 1):
if self.vertical_walls > 0:
if board.valid_wall_placement('Z', row, column, print_failure=False):
wall_moves.append(('Z', row, column))
if self.horizontal_walls > 0:
if board.valid_wall_placement('P', row, column, print_failure=False):
wall_moves.append(('P', row, column))
# Sort wall moves adjacent to starting square or closest enemy pawns
starting = board.player_1_start if self.player == 'X' else board.player_2_start
wall_moves.sort(key=lambda move:
min(board.non_diagonal_distance((move[1], move[2]), starting[0]),
board.non_diagonal_distance((move[1], move[2]), starting[1])))
return wall_moves if all_moves else \
tuple(islice(wall_moves,
(board.num_placed_walls // 3
if board.num_placed_walls < 9 else
(board.num_placed_walls // 6 + 1))
* board.num_placed_walls + 8))
# Find all move combinations that don't block any one of the pawns' path to the goal
@staticmethod
def legal_pawn_wall_move_combinations(board, pawn_moves, wall_moves):
player = pawn_moves[0][0]
# TODO: Change algorithm to use a search tree and process all pawn moves at the same time
moves = []
# Walls not touching any walls on their ends cannot block a path
skip_check_set = {
wall_move for wall_move in wall_moves
if (wall_move[0] == 'Z' and wall_move[1] != 0 and
not board.board[wall_move[1]][wall_move[2]].top and
not board.board[wall_move[1]][wall_move[2] + 1].top and
wall_move[1] != board.rows - 2 and
not board.board[wall_move[1] + 1][wall_move[2]].bottom and
not board.board[wall_move[1] + 1][wall_move[2] + 1].bottom)
or
(wall_move[0] == 'P' and wall_move[2] != 0 and
not board.board[wall_move[1]][wall_move[2]].left and
not board.board[wall_move[1] + 1][wall_move[2]].left and
wall_move[2] != board.columns - 2 and
not board.board[wall_move[1]][wall_move[2] + 1].right and
not board.board[wall_move[1] + 1][wall_move[2] + 1].right)
}
# Filter out walls that block the path of the opponents pawns
wall_moves = Player.filter_blocking_walls(board, 'O' if player == 'X' else 'X', wall_moves,
skip_check_set=skip_check_set)
# Walls that don't block the first and second pawn at their base position
non_pawn_blocking = (Player.filter_blocking_walls(board, player, wall_moves, only_pawn_index=0,
skip_check_set=skip_check_set),
Player.filter_blocking_walls(board, player, wall_moves, only_pawn_index=1,
skip_check_set=skip_check_set))
for pawn_move in pawn_moves:
undo_move = board.move_pawn(*pawn_move)
new_wall_moves = Player.filter_blocking_walls(board, player, non_pawn_blocking[(pawn_move[1] + 1) % 2],
only_pawn_index=pawn_move[1], skip_check_set=skip_check_set)
board.move_pawn(*undo_move)
moves += product((pawn_move,), new_wall_moves)
return moves
# Returns a filtered list of wall moves that don't block the path of the given player's pawns
# The algorithm tries to find two non-adjacent paths for every path that needs to be checked.
# If found, both of the paths cannot be blocked, so path-checking for that path is excluded.
# If not, it tests if any of the walls obstruct the path and then tries to reconstruct it
@staticmethod
def filter_blocking_walls(board, player, wall_moves, only_pawn_index=None, skip_check_set=None):
if player == 'X':
pawns = board.player_1_pawns
goals = board.player_2_start
else:
pawns = board.player_2_pawns
goals = board.player_1_start
# Try to find two non-adjacent paths from each start to each pawn
paths = [True] * 4
for pawn_index in range(2):
if only_pawn_index is not None and pawn_index != only_pawn_index:
continue
for goal_index in range(2):
path, jump_filter = \
Player.find_non_adjacent_paths(board, pawns[pawn_index], goals[goal_index])
new_path, _ = \
Player.find_non_adjacent_paths(board, pawns[pawn_index], goals[goal_index], jump_filter=jump_filter)
# If two paths are found blocking both the paths is impossible otherwise it is possible
if type(new_path) is not dict:
paths[pawn_index * 2 + goal_index] = path
# Check if there are is no way to block any of the paths
if paths == [True, True, True, True]:
return wall_moves
# Test if any of the walls obstructs the paths and reconstruct the path if necessary
filtered_wall_moves = []
for wall_move in wall_moves:
if skip_check_set is not None and wall_move in skip_check_set:
filtered_wall_moves.append(wall_move)
continue
legal = True
board.place_wall(*wall_move)
for index, path in enumerate(paths):
if type(path) is dict:
# Check for parts of the path that need to be reconstructed
first_affected_square = (inf, None)
last_affected_square = (-inf, None)
for potential_square in Player.iter_wall_placement_affected_squares(board, *wall_move):
if potential_square in path and path[potential_square][1] not in \
board.iter_non_blocking_jumps(potential_square[0], potential_square[1]):
if first_affected_square[0] > path[potential_square][0]:
first_affected_square = (path[potential_square][0], potential_square)
last_affected_square = max(last_affected_square, path[potential_square])
# Check if the path can be reconstructed
if first_affected_square[1] is not None and \
not board.check_path(first_affected_square[1], last_affected_square[1]):
legal = False
break
board.place_wall(*wall_move, lift=True)
if legal:
filtered_wall_moves.append(wall_move)
return filtered_wall_moves
@staticmethod
def find_non_adjacent_paths(board, source, destination, jump_filter=None):
if source[0] == destination[0] and source[1] == destination[1]:
return {}, set() if jump_filter is None else None
# Dictionary for keeping track of the path
prev_jump = {(source[0], source[1]): None}
prio_queue = [(board.non_diagonal_distance(source, destination), *source)]
while len(prio_queue):
# noinspection PyTupleAssignmentBalance
_, row, column = heapq.heappop(prio_queue)
pos = (row, column)
if row == destination[0] and column == destination[1]:
break
for new_pos in filter(
lambda jump: jump not in prev_jump and (jump_filter is None or jump not in jump_filter),
board.iter_non_blocking_jumps(row, column)):
prev_jump[new_pos] = pos
heapq.heappush(prio_queue, (board.non_diagonal_distance(new_pos, destination), *new_pos))
# Check if a path is found
if (destination[0], destination[1]) not in prev_jump:
return False, set() if jump_filter is None else None
# Prep for filling the filter if needed
new_jump_filter = set() if jump_filter is None else None
# Trace the path along with the order of nodes
ordered_path = dict()
order = 0
current = (destination[0], destination[1])
while current[0] != source[0] or current[1] != source[1]:
ordered_path[current] = (order, prev_jump[current])
# Fill out the filter
if new_jump_filter is not None:
for adjacent_square in Player.iter_adjacent_squares_from_jump(board, current, prev_jump[current]):
new_jump_filter.add(adjacent_square)
current = prev_jump[current]
order += 1
if new_jump_filter is not None:
new_jump_filter.remove((source[0], source[1]))
new_jump_filter.remove((destination[0], destination[1]))
return ordered_path, new_jump_filter
# Returns all the squares that cannot be jumped to in order for the paths to be non-adjacent
@staticmethod
def iter_adjacent_squares_from_jump(board, prev_pos, pos, include_jump_squares=True):
if include_jump_squares:
yield prev_pos
yield pos
# Top-side
if prev_pos[0] > pos[0]:
# Top-Middle
if prev_pos[1] == pos[1]:
if pos[1] > 0:
yield pos[0], pos[1] - 1
yield prev_pos[0], prev_pos[1] - 1
if pos[1] < board.columns - 1:
yield pos[0], pos[1] + 1
yield prev_pos[0], prev_pos[1] + 1
# Top-Left
elif prev_pos[1] > pos[1]:
yield pos[0], pos[1] + 1
yield pos[0] + 1, pos[1]
if pos[0] > 0:
yield pos[0] - 1, pos[1] + 1
if pos[1] > 0:
yield pos[0] + 1, pos[1] - 1
if prev_pos[0] < board.rows - 1:
yield prev_pos[0] + 1, prev_pos[1] - 1
if prev_pos[1] < board.columns - 1:
yield prev_pos[0] - 1, prev_pos[1] + 1
# Top-Right
else:
yield pos[0], pos[1] - 1
yield pos[0] + 1, pos[1]
if pos[0] > 0:
yield pos[0] - 1, pos[1] - 1
if pos[1] < board.columns - 1:
yield pos[0] + 1, pos[1] + 1
if prev_pos[0] < board.rows - 1:
yield prev_pos[0] + 1, prev_pos[1] + 1
if prev_pos[1] > 0:
yield prev_pos[0] - 1, prev_pos[1] - 1
# Middle
elif prev_pos[0] == pos[0]:
# Left and Right
if pos[0] > 0:
yield pos[0] - 1, pos[1]
yield prev_pos[0] - 1, prev_pos[1]
if pos[0] < board.rows - 1:
yield pos[0] + 1, pos[1]
yield prev_pos[0] + 1, prev_pos[1]
# Bottom-side
else:
# Since the edges are undirected the bottom side is symmetrical to the top side
yield from Player.iter_adjacent_squares_from_jump(board, pos, prev_pos, include_jump_squares=False)
# Returns all squares whose non-blocking jumps may be changed if a given wall is placed
@staticmethod
def iter_wall_placement_affected_squares(board, wall_type, row, column):
yield row, column
yield row, column + 1
yield row + 1, column
yield row + 1, column + 1
if wall_type == 'Z':
if row > 0:
yield row - 1, column
yield row - 1, column + 1
if row < board.rows - 2:
yield row + 2, column
yield row + 2, column + 1
else:
if column > 0:
yield row, column - 1
yield row + 1, column - 1
if column < board.columns - 2:
yield row, column + 2
yield row + 1, column + 2
def minimax(self, board, depth, alpha, beta):
if depth == 0 or board.game_end():
return board.static_evaluation()
no_legal_moves = True
if self.player == 'X':
opponent = self.game.player_2
max_eval = -inf
for move in self.legal_board_moves(board, all_moves=False):
no_legal_moves = False
undo_move = self.in_place_play_move(board, move)
evaluation = opponent.minimax(board, depth - 1, alpha, beta)
max_eval = max(max_eval, evaluation)
self.in_place_play_move(*undo_move)
# Alpha cut off
alpha = max(alpha, evaluation)
if beta <= alpha:
break
return 0 if no_legal_moves else max_eval
else:
opponent = self.game.player_1
min_eval = inf
for move in self.legal_board_moves(board, all_moves=False):
no_legal_moves = False
undo_move = self.in_place_play_move(board, move)
evaluation = opponent.minimax(board, depth - 1, alpha, beta)
min_eval = min(min_eval, evaluation)
self.in_place_play_move(*undo_move)
# Beta cut off
beta = min(beta, evaluation)
if beta <= alpha:
break
return 0 if no_legal_moves else min_eval
# Helper function that the child processes call; plays the move on the board and calls minimax
def minimax_caller(self, board, move):
undo_move = self.in_place_play_move(board, move)
evaluation = self.game.player_2.minimax(board, 2, -inf, inf) if self.player == 'X' else \
self.game.player_1.minimax(board, 2, -inf, inf)
self.in_place_play_move(*undo_move)
return evaluation
class Computer(Player):
def __init__(self, player, walls, game):
super().__init__(player, walls, game)
def get_move(self, board):
return self.get_computer_move(board)
class Human(Player):
def __init__(self, player, walls, game):
super().__init__(player, walls, game)
def get_move(self, board):
# Check if there are any legal moves
if len(self.legal_board_moves(board)) == 0:
return None
# Ask for input until the move is valid
move = None
while not self.valid_move(board, move):
move = input("Enter the move: ").strip()
# Add command for generating a computer move for the player
if move == "!get_move":
return self.get_computer_move(board)
elif move == "!skip":
return ()
player, pawn_index, pawn_row, pawn_column, wall_type, wall_row, wall_column = self.extract_move_info(move)
return \
((player, pawn_index, pawn_row, pawn_column),) \
if wall_type is None else \
(player, pawn_index, pawn_row, pawn_column), (wall_type, wall_row, wall_column)
def valid_move(self, board, move):
if move is None:
return False
# Check the format
if not fullmatch("\[[XO] [12]] \[[1-9A-Z] [1-9A-Z]]?( \[[ZP] [1-9A-Z] [1-9A-Z]])", move):
print("Invalid format! Input must be of [X/O 1/2] [new_row new_column] ([Z/P row column])")
return False
player, pawn_index, pawn_row, pawn_column, wall_type, wall_row, wall_column = self.extract_move_info(move)
# Check the player
if player != self.player:
print("You cannot move your opponents pawns!")
return False
# Check pawn move
if not board.valid_pawn_move(player, pawn_index, pawn_row, pawn_column):
return False
if wall_type is not None:
# Check if the player has the wall type
if (wall_type == 'Z' and self.vertical_walls == 0) or (wall_type == 'P' and self.horizontal_walls == 0):
print("There are no more walls of that type to place!")
return False
# Check wall placement
if not board.valid_wall_placement(wall_type, wall_row, wall_column):
return False
# Check if new position has no blocked paths
if not board.check_paths_after_move(((player, pawn_index, pawn_row, pawn_column),
(wall_type, wall_row, wall_column))):
return False
# Check if wall can be placed
elif self.vertical_walls > 0 or self.horizontal_walls > 0:
print("You must place a wall!")
return False
return True
# Move must be of "[X/O 1/2] [new_row new_column] ([Z/P row column]" format
# Returns player, pawn_index, pawn_row, pawn_column, wall_type, wall_row, wall_column
@staticmethod
def extract_move_info(move):
player = move[1]
pawn_index = int(move[3]) - 1
pawn_row, pawn_column = Board.board_index_to_matrix_index(move[7]), Board.board_index_to_matrix_index(move[9])
wall_type = None
wall_row, wall_column = None, None
if len(move) != 11:
wall_type = move[13]
wall_row = Board.board_index_to_matrix_index(move[15])
wall_column = Board.board_index_to_matrix_index(move[17])
return player, pawn_index, pawn_row, pawn_column, wall_type, wall_row, wall_column
| JovanMarkovic99/blockade-board-game | players.py | players.py | py | 31,405 | python | en | code | 0 | github-code | 50 |
26012548738 | #!/usr/bin/env python
'''Testing the server by sending logs'''
from pysyslogclient import SyslogClientRFC5424, SyslogClientRFC3164
def test_tcp_rfc3164():
client1 = SyslogClientRFC3164('127.0.0.1', 1514, proto='TCP')
client1.log('My message', program='myapp')
client1.close()
def test_tcp_rfc5424():
client2 = SyslogClientRFC5424('127.0.0.1', 1514, proto='TCP')
client2.log('My message', program='myapp')
client2.close()
if __name__ == '__main__':
test_tcp_rfc3164()
test_tcp_rfc5424()
| snoozeweb/snooze_plugins | input/syslog/examples/client.py | client.py | py | 522 | python | en | code | 0 | github-code | 50 |
73741877595 | from day18 import Day18
import unittest
class TestDay18(unittest.TestCase):
def test_part1(self) -> None:
input = Day18().read_file("tests/test_day18.1.txt")
output = 4
actual = Day18.solve_part1(input)
self.assertEqual(actual, output, f"input={input}, expected={output}, actual={actual}")
def test_part2(self) -> None:
input = Day18().read_file("tests/test_day18.2.txt")
output = 3
actual = Day18.solve_part2(input)
self.assertEqual(actual, output, f"input={input}, expected={output}, actual={actual}")
| jimmynguyen/advent-of-code | 2017/tests/test_day18.py | test_day18.py | py | 579 | python | en | code | 0 | github-code | 50 |
31557975708 | # coding: utf-8
import sys
sys.path.append('..')
from common import config
# 在用GPU运行时,请打开下面的注释(需要cupy)
# ===============================================
# config.GPU = True
# ===============================================
from common.np import *
import pickle
from common.trainer import Trainer
from common.optimizer import Adam
from cbow import CBOW
from skip_gram import SkipGram
from common.util import create_contexts_target, to_cpu, to_gpu
from dataset import ptb
# 设定超参数
window_size = 5
hidden_size = 100
batch_size = 100
max_epoch = 10
# 读入数据
corpus, word_to_id, id_to_word = ptb.load_data('train')
vocab_size = len(word_to_id)
contexts, target = create_contexts_target(corpus, window_size)
if config.GPU:
contexts, target = to_gpu(contexts), to_gpu(target)
# 生成模型等
model = CBOW(vocab_size, hidden_size, window_size, corpus)
# model = SkipGram(vocab_size, hidden_size, window_size, corpus)
optimizer = Adam()
trainer = Trainer(model, optimizer)
# 开始学习
trainer.fit(contexts, target, max_epoch, batch_size)
trainer.plot()
# 保存必要数据,以便后续使用
word_vecs = model.word_vecs
if config.GPU:
word_vecs = to_cpu(word_vecs)
params = {}
params['word_vecs'] = word_vecs.astype(np.float16)
params['word_to_id'] = word_to_id
params['id_to_word'] = id_to_word
pkl_file = 'cbow_params.pkl' # or 'skipgram_params.pkl'
with open(pkl_file, 'wb') as f:
pickle.dump(params, f, -1)
| UserXiaohu/Natrual-Language-Processing | code/ch04/train.py | train.py | py | 1,481 | python | en | code | 25 | github-code | 50 |
2483177839 | from datetime import time as d_time, timedelta as d_timedelta, timezone as d_timezone
import logging
from modules.json import Json
from modules.threading import Thread
from os.path import getmtime, isfile
from time import sleep
from typing import Union
from threading import current_thread
logger = logging.getLogger("main")
_FILE_PATH = "config.json"
_CONFIG: dict
modify_time = 0
def _gen_config():
"""
如果沒有設置檔,則從範例中生成。
"""
with open("config-example.json", mode="rb") as example_file:
EXAMPLE_DATA = example_file.read()
with open("config.json", mode="wb") as config_file:
config_file.write(EXAMPLE_DATA)
Config.update()
sleep(1)
logger.critical("config.json not found.")
logger.info("Generate a new config.json from config-example.json.")
Config.ready(False)
current_thread().stop()
def _config_patch():
"""
設置檔完整度檢查。
"""
EXAMPLE_DATA = Json.load("config-example.json")
CONFIG_DATA = Json.load(_FILE_PATH)
Json.dump("config.json", __patch(EXAMPLE_DATA, CONFIG_DATA))
def __patch(example: dict, config: dict):
"""
設置完整度修復。
"""
example = example.copy()
config = config.copy()
for key, value in example.items():
try:
c_value = config[key]
if type(c_value) == dict:
c_value = __patch(value, c_value)
config[key] = c_value
except KeyError:
config[key] = value
return config
class _Discord_Config(dict):
token: str
prefixs: list[str] = []
admin_role: int
def __init__(self, _config: dict) -> None:
for item in _config.items():
self[item[0]] = item[1]
self.token = _config["token"]
self.prefixs = _config["prefixs"]
self.admin_role = _config["admin_role"]
class _Rcon_Info(dict):
address: str
port: int
password: str
timeout: int
m_filter: str
def __init__(self, _config: dict) -> None:
for item in _config.items():
self[item[0]] = item[1]
self.address = _config["address"]
self.port = _config["port"]
self.password = _config["password"]
self.timeout = _config["timeout"]
self.m_filter = _config["m_filter"]
class _Discord_Info(dict):
chat_channel: int
state_channel: int
message_forward: bool
def __init__(self, _config: dict) -> None:
for item in _config.items():
self[item[0]] = item[1]
self.chat_channel = _config["chat_channel"]
self.state_channel = _config["state_channel"]
self.message_forward = _config["message_forward"]
class _Ark_Server(dict):
key: str
local: bool
dir_path: str
file_name: str
display_name: str
rcon: _Rcon_Info
discord: _Discord_Info
save: str
restart: str
clear_dino: bool
rcon_session = None
def __init__(self, _config: dict) -> None:
for item in _config.items():
self[item[0]] = item[1]
self.key = _config["key"]
self.local = _config["local"]
self.dir_path = _config["dir_path"]
self.file_name = _config["file_name"]
self.display_name = _config["display_name"]
self.rcon = _Rcon_Info(_config["rcon"])
self.discord = _Discord_Info(_config["discord"])
self.save = _config["save"]
self.restart = _config["restart"]
self.clear_dino = _config["clear_dino"]
class _Web_Console(dict):
host: str
port: int
debug: bool
def __init__(self, _config: dict) -> None:
for item in _config.items():
self[item[0]] = item[1]
self.host = _config["host"]
self.port = _config["port"]
self.debug = _config["debug"]
class _Time_Data(list[str, bool]):
time: d_time
backup: bool
def __init__(self, _config: list) -> None:
for item in _config:
self.append(item)
self.time = d_time.fromisoformat(_config[0])
self.backup = _config[1]
class _Time_Setting(dict):
time_zone: d_timezone
save_delay: int
save_tables: dict[list[_Time_Data]] = {}
restart_tables: dict[list[_Time_Data]] = {}
backup_day: d_timedelta
def __init__(self, _config: dict) -> None:
for item in _config.items():
self[item[0]] = item[1]
self.time_zone = d_timezone(d_timedelta(hours=_config["time_zone"]))
self.save_delay = _config["save_delay"]
_save_tables: dict = _config["save_tables"]
for key in _save_tables.keys():
time_data = _save_tables[key]
self.save_tables[key] = [_Time_Data(__config) for __config in time_data]
_restart_tables: dict = _config["restart_tables"]
for key in _restart_tables.keys():
time_data = _restart_tables[key]
self.restart_tables[key] = [_Time_Data(__config) for __config in time_data]
self.backup_day = d_timedelta(days=_config["backup_day"])
class _Other_Setting(dict):
low_battery: int
m_filter_tables: dict[dict[list[str]]] = {}
log_level: str
message: dict[str] = {}
state_message: dict[str] = {}
def __init__(self, _config: dict):
for item in _config.items():
self[item[0]] = item[1]
self.low_battery = _config["low_battery"]
self.m_filter_tables = _config["m_filter_tables"].copy()
self.log_level = _config["log_level"]
self.message = _config["message"]
self.state_message = _config["state_message"]
class Config:
discord: _Discord_Config
servers: list[_Ark_Server] = []
web_console: _Web_Console
time_setting: _Time_Setting
other_setting: _Other_Setting
updated: bool = False
readied: Union[bool, None] = None
@classmethod
def update(self):
"""
從設置檔中更新當前設置。
"""
global _CONFIG
_config_patch()
_CONFIG = Json.load(_FILE_PATH)
self.config = _CONFIG.copy()
self.discord = _Discord_Config(_CONFIG["discord"])
i = 0
for _config in _CONFIG["servers"]:
if len(self.servers) > i:
from modules.rcon import Rcon_Session
self.servers[i].rcon_session = Rcon_Session(self.servers[i])
else:
self.servers.append(_Ark_Server(_config))
i += 1
self.web_console = _Web_Console(_CONFIG["web_console"])
self.time_setting = _Time_Setting(_CONFIG["time_setting"])
self.other_setting = _Other_Setting(_CONFIG["other_setting"])
self.updated = True
@classmethod
def ready(self, value: bool):
self.readied = value
def auto_update():
"""
自動更新設置檔。
"""
global modify_time
# 檢查檔案是否存在
if not isfile(_FILE_PATH):
_gen_config()
else:
Config.update()
# 準備完成
Config.ready(True)
while True:
# 檢查設置檔修改時間
if getmtime("config.json") != modify_time:
Config.update()
modify_time = getmtime("config.json")
sleep(1)
auto_update_thread = Thread(target=auto_update, name="Config_Auto_Update")
auto_update_thread.start()
| AloneAlongLife/ARK-Server-Manager-Plus_2.0 | modules/config.py | config.py | py | 7,287 | python | en | code | 2 | github-code | 50 |
70822559517 | import instance_manager as ec2_util
import boto3
sqs_client = boto3.client('sqs', region_name="us-east-1")
def get_sqs_url(client):
sqs_queue = client.get_queue_url(QueueName="Request-Queue")
return sqs_queue["QueueUrl"]
INPUT_QUEUE = get_sqs_url(sqs_client)
WEB_TIER = "i-0711d441e1e48e5b5"
#APP_TIER = "i-082168043513b429a"
def auto_scale_instances():
queue_length = int(
sqs_client.get_queue_attributes(QueueUrl=INPUT_QUEUE, AttributeNames=['ApproximateNumberOfMessages']).get(
"Attributes").get("ApproximateNumberOfMessages"))
print("Request queue length:", queue_length)
# band_dict = {0: 0, 20: 1, 100: 2, 500: 5, 1000: 19}
running_instances = ec2_util.get_running_instances()
stopped_instances = ec2_util.get_stopped_instances()
running_instances.remove(WEB_TIER)
#running_instances.remove(APP_TIER)
if queue_length == 0:
all_instances = ec2_util.get_running_instances()
all_instances.remove(WEB_TIER)
#all_instances.remove(APP_TIER)
print("Queue is empty, shutting down all instances except 1 (downscaling)")
ec2_util.stop_multiple_instances(all_instances)
return
elif 1 <= queue_length <= 5:
if len(running_instances) == 0:
if len(stopped_instances) >= 1:
ec2_util.start_instance(stopped_instances[0])
else:
ec2_util.create_instance()
elif 5 < queue_length <= 50:
if len(running_instances) < 10:
length_of_running = len(running_instances)
length_of_stopped = len(stopped_instances)
needed_instances = 10 - length_of_running
if length_of_stopped >= needed_instances:
ec2_util.start_multiple_instances(stopped_instances[:needed_instances])
else:
ec2_util.start_multiple_instances(stopped_instances)
for _ in range(needed_instances - length_of_stopped):
ec2_util.create_instance()
else:
if len(running_instances) < 18:
length_of_running = len(running_instances)
length_of_stopped = len(stopped_instances)
needed_instances = 18 - length_of_running
if length_of_stopped >= needed_instances:
ec2_util.start_multiple_instances(stopped_instances[:needed_instances])
else:
ec2_util.start_multiple_instances(stopped_instances)
for _ in range(needed_instances - length_of_stopped):
ec2_util.create_instance()
print("Starting Auto Scaling")
auto_scale_instances()
exit(0)
| akhi-uday/CSE546 | auto-scale/controller.py | controller.py | py | 2,644 | python | en | code | 0 | github-code | 50 |
9303118777 | from sys import stdin
for i in range(2, int(stdin.readline())):
flag = False
for j in range(2, i):
if (i % j) == 0:
flag = True
break
if flag is False:
print(i, end=" ")
| niranjrajasekaran/efficient-solution | prime_number.py | prime_number.py | py | 223 | python | en | code | 0 | github-code | 50 |
73383830876 | """
##############################################
Clustering analysis module
##############################################
All functions related to the clustering of poles for automatic OMA.
"""
import numpy as np
import hdbscan
from . import modal
def crossdiff(arr, relative=False, allow_negatives=False):
"""
Establish cross difference matrix used for clustering analysis.
Arguments
---------------------------
arr : double
array to provide cross difference of (n_points long)
relative : False, optional
output relative difference or absolute difference
allow_negatives : False, optional
whether or not to allow negative difference
Returns
---------------------------
diff : double
cross-difference matrix (n_points-by-n_points)
References
---------------------------
Kvåle and Øiseth :cite:`Kvale2020`
"""
arr1, arr2 = np.meshgrid(arr, arr)
if relative:
scaling = arr1
else:
scaling = arr1*0+1.0
if allow_negatives:
diff = np.minimum(np.real((arr1-arr2)/scaling), np.real((arr1+arr2)/scaling)) + np.minimum(np.imag((arr1-arr2)/scaling), np.imag((arr1+arr2)/scaling))*1j
else:
diff = (arr1-arr2)/scaling
# Remove imaginary 0 if input is float (real)
if ~np.any(np.iscomplex(arr)):
diff = np.real(diff).astype('float')
return diff
def establish_tot_diff(lambd, phi, order, boolean_stops='default', scaling=None):
"""
Establish total difference matrix based on input modes (from find_stable_modes).
Arguments
---------------------------
lambd : double
array with complex-valued eigenvalues deemed stable
phi : double
2d array with complex-valued eigenvectors (stacked as columns), each column corresponds to a mode
orders : int
corresponding order for each stable mode
boolean_stops : 'default', optional
boolean stops to remove problematic poles (refers to difference matrices), e.g., to avoid same-order poles to appear
in the same cluster the standard value {'order': [1, np.inf]} could be used
scaling : {'mac': 1.0, 'lambda_real': 1.0, 'lambda_imag': 1.0}, optional
scaling of predefined available variables used in total difference (available
variables: 'mac', 'lambda_real', 'lambda_imag', 'omega_d', 'omega_n', 'order', 'xi')
Returns
---------------------------
tot_diff : double
cross-difference matrix (n_points-by-n_points)
References
---------------------------
Kvåle and Øiseth :cite:`Kvale2020`
"""
if boolean_stops is 'default':
boolean_stops = {'order': [1, np.inf]}
elif boolean_stops is None:
boolean_stops = {}
if scaling is None:
scaling = {'mac': 1.0, 'lambda_real': 1.0, 'lambda_imag': 1.0}
omega_n = np.abs(lambd)
omega_d = np.abs(np.imag(lambd))
xi = -np.real(lambd)/np.abs(lambd)
# Establish dictionary with available difference variables
diff_vars = dict()
diff_vars['mac'] = np.abs(1.0 - modal.xmacmat(phi))
xlambda_diff = crossdiff(lambd, relative=True, allow_negatives=True)
diff_vars['lambda_real'] = np.abs(np.real(xlambda_diff))
diff_vars['lambda_imag'] = np.abs(np.imag(xlambda_diff))
diff_vars['omega_n'] = np.abs(crossdiff(omega_n, relative=True))
diff_vars['omega_d'] = np.abs(crossdiff(omega_d, relative=True))
diff_vars['order'] = np.abs(crossdiff(order, relative=False)) #generates != integers?
diff_vars['xi'] = np.abs(crossdiff(xi, relative=True))
# Establish boolean hard stop differences
boolean_stop_diff = np.zeros(np.shape(diff_vars['xi']))
for key in boolean_stops.keys():
stops = boolean_stops[key]
invalid_ix = np.logical_or((diff_vars[key]<stops[0]), (diff_vars[key]>stops[1]))
boolean_stop_diff[invalid_ix] = np.inf
# Establish total difference
tot_diff = np.zeros(np.shape(diff_vars['xi']))
for var in scaling:
tot_diff += boolean_stop_diff + (diff_vars[var]*scaling[var])**2
tot_diff = np.sqrt(tot_diff) + boolean_stop_diff
return tot_diff
class PoleClusterer:
"""
Object to create pole clusters.
Arguments
---------------------------
lambd : double
array with complex-valued eigenvalues deemed stable
phi : double
2d array with complex-valued eigenvectors (stacked as columns), each column corresponds to a mode
orders : int
corresponding order for each stable mode
min_samples : 20, optional
number of points in neighbourhood for point to be
considered core point (larger value => more conservative clustering)
min_cluster_size : 20, optional
when min_cluster_size points fall out of cluster it is not a split,
it is merely points falling out of the cluster
alpha : 1.0, optional
distance scaling parameter, implies conservativism of clustering (higher => fewer points)
boolean_stops : 'default', optional
boolean stops to remove problematic poles (refers to difference matrices), e.g., to avoid same-order poles to appear
in the same cluster the standard value {'order': [1, np.inf]} could be used
scaling : {'mac': 1.0, 'lambda_real': 1.0, 'lambda_imag': 1.0}, optional
scaling of predefined available variables used in total difference (available
variables: 'mac', 'lambda_real', 'lambda_imag', 'omega_d', 'omega_n', 'order', 'xi')
References
---------------------------
Kvåle and Øiseth :cite:`Kvale2020`
"""
def __init__(self, lambd, phi, order, min_samples=20, min_cluster_size=20, alpha=1.0, boolean_stops='default', scaling=None):
self.boolean_stops = boolean_stops
if scaling is None:
self.scaling = {'mac': 1.0, 'lambda_real': 1.0, 'lambda_imag': 1.0}
else:
self.scaling = scaling
self.hdbscan_clusterer = hdbscan.HDBSCAN(metric='precomputed', min_samples=min_samples, min_cluster_size=min_cluster_size, alpha=alpha, gen_min_span_tree=False)
self.lambd = lambd
self.phi = phi
self.order = order
self.cluster()
def cluster(self):
"""
Create tot_diff matrix and HDBSCAN cluster object from input data.
"""
self.tot_diff = establish_tot_diff(self.lambd, self.phi, self.order, boolean_stops=self.boolean_stops, scaling=self.scaling)
self.hdbscan_clusterer.fit(self.tot_diff)
def postprocess(self, prob_threshold=0.0, normalize_and_maxreal=True):
"""
Postprocess cluster object (sort and restrict).
Arguments
---------------------------
prob_threshold : 0.0, optional
threshold value for probability of point belonging
to its determined cluster
normalize_and_maxreal : True, optional
whether or not to normalize each mode shape and maximize its real value
(rotate all components equally much in complex plane)
Returns
---------------------------
lambd_used : double
sorted/remaining eigenvalues after restrictions/sort
phi_used : double
sorted/remaining eigenvectors after restrictions/sort
order_stab_used : double
corresponding orders
group_ix : int
indices (sorted based on damped natural freq.) of modes
all_single_ix : double
index corresponding to input data
probs : double
probabilities of all points in all clusters
"""
omega_d = np.abs(np.imag(self.lambd))
if normalize_and_maxreal:
phi0,__ = modal.normalize_phi(modal.maxreal(self.phi))
else:
phi0 = self.phi*1.0
# Establish all labels
labels_all = self.hdbscan_clusterer.labels_
# Align modes
for label in np.unique(labels_all):
phi0[:, labels_all==label] = modal.align_modes(phi0[:, labels_all==label]) #also align modes
# Remove noise (label <0)
keep_ix_temp = np.where(labels_all>=0)[0]
# Apply probability threshold
probs_temp = self.hdbscan_clusterer.probabilities_[keep_ix_temp]
keep_ix = keep_ix_temp[probs_temp>=prob_threshold]
probs_unsorted = self.hdbscan_clusterer.probabilities_[keep_ix]
# Retain only "kept" indices from arrays
labels_unsorted = labels_all[keep_ix]
if len(labels_unsorted) == 0:
return [], [], [], [], [], []
n_labels = max(labels_unsorted)+1
# Sort of cluster groups based on mean frequency
wmean = [np.mean(omega_d[keep_ix][labels_unsorted==label]) for label in range(0, max(labels_unsorted)+1)]
sort_ix = np.argsort(wmean)
# Rearrange labels and probs (sorted based on frequency)
labels = np.array([np.where(sort_ix==label)[0][0] for label in labels_unsorted]).flatten()
probs = np.hstack([probs_unsorted[labels==label] for label in range(0, n_labels)])
# Remove double results (more poles at same order within same cluster)
keep_single_ix = [None]*n_labels
for label in range(0, n_labels):
relevant_orders = self.order[keep_ix][labels==label]
unique_relevant_orders = np.unique(relevant_orders)
groups = [np.where(o==relevant_orders)[0] for o in unique_relevant_orders]
keep_best_ix = []
these_ix = np.where(labels==label)[0]
for group in groups:
keep_best_ix.append(these_ix[group[np.argmax(probs[labels==label][group])]])
keep_single_ix[label] = np.array(keep_best_ix)
all_single_ix = np.hstack(keep_single_ix)
group_ixs = labels[all_single_ix]
probs = probs[all_single_ix]
order_stab_used = self.order[keep_ix][all_single_ix]
lambd_used = self.lambd[keep_ix][all_single_ix]
phi_used = phi0[:, keep_ix][:, all_single_ix]
return lambd_used, phi_used, order_stab_used, group_ixs, all_single_ix, probs
def group_clusters(lambd_used, phi_used, order_stab_used, group_ixs, all_single_ixs, probs):
'''
Group the output of PoleClusterer.postprocess()
Arguments
---------------------------
lambd_used : double
sorted/remaining eigenvalues after restrictions/sort
to its determined cluster
phi_used : double
sorted/remaining eigenvectors after restrictions/sort
order_stab_used : double
corresponding orders
group_ixs : int
indices (sorted based on damped natural freq.) of modes
all_single_ixs : double
index corresponding to input data
probs : double
probabilities of all points in all clusters
Returns
---------------------------
xi_cluster : double
list of arrays with xi grouped
omega_n_cluster : double
list of arrays with omega_n grouped
phi_cluster : double
list of arrays with phi grouped
order_cluster : double
list of arrays with orders grouped
probs_cluster : double
list of arrays with probs grouped
ixs_cluster : double
list of arrays with ixs corresponding to each cluster
'''
n_groups = len(np.unique(group_ixs))
xi_cluster = [None]*n_groups
omega_n_cluster = [None]*n_groups
lambd_cluster = [None]*n_groups
phi_cluster = [None]*n_groups
order_cluster = [None]*n_groups
probs_cluster = [None]*n_groups
ixs_cluster = [None]*n_groups
for group_ix in range(n_groups):
this_ix = group_ixs==group_ix
lambd_cluster[group_ix] = lambd_used[this_ix]
xi_cluster[group_ix] = -np.real(lambd_used[this_ix])/np.abs(lambd_used[this_ix])
omega_n_cluster[group_ix] = np.abs(lambd_used[this_ix])
phi_cluster[group_ix] = phi_used[:, this_ix]
order_cluster[group_ix] = order_stab_used[this_ix]
probs_cluster[group_ix] = probs[this_ix]
ixs_cluster[group_ix] = all_single_ixs[this_ix]
return xi_cluster, omega_n_cluster, phi_cluster, order_cluster, probs_cluster, ixs_cluster
def group_array(arr, group_ixs, axis=0):
'''
Group a single output array of PoleClusterer.postprocess() based on group indices.
Arguments
---------------------------
arr : double
array corresponding
group_ixs : int
indices (sorted based on damped natural freq.) of modes
Returns
---------------------------
arr_grouped : double
grouped array
'''
n_groups = len(np.unique(group_ixs))
arr_grouped = [None]*n_groups
for group_ix in range(n_groups):
this_ix = np.where(group_ixs==group_ix)[0]
arr_grouped[group_ix] = np.take(arr, this_ix, axis=axis)
return arr_grouped | knutankv/koma | koma/clustering.py | clustering.py | py | 12,994 | python | en | code | 21 | github-code | 50 |
272915248 | import warnings
import os
import numpy as np
import types
from .dng import Tag, dngIFD, dngTag, DNG, DNGTags
from .defs import Compression, DNGVersion, SampleFormat
from .packing import *
from .camdefs import BaseCameraModel
class DNGBASE:
def __init__(self) -> None:
self.compress = None
self.path = None
self.tags = None
self.filter = None
def __data_condition__(self, data : np.ndarray) -> None:
if data.dtype != np.uint16 and data.dtype != np.float32:
raise Exception("RAW Data is not in correct format. Must be uint16_t or float32_t Numpy Array. ")
def __tags_condition__(self, tags : DNGTags) -> None:
if not tags.get(Tag.ImageWidth):
raise Exception("No width is defined in tags.")
if not tags.get(Tag.ImageLength):
raise Exception("No height is defined in tags.")
if not tags.get(Tag.BitsPerSample):
raise Exception("Bit per pixel is not defined.")
def __unpack_pixels__(self, data : np.ndarray) -> np.ndarray:
return data
def __filter__(self, rawFrame: np.ndarray, filter : types.FunctionType) -> np.ndarray:
if not filter:
return rawFrame
processed = filter(rawFrame)
if not isinstance(processed, np.ndarray):
raise TypeError("return value is not a valid numpy array!")
elif processed.shape != rawFrame.shape:
raise ValueError("return array does not have the same shape!")
if processed.dtype != np.uint16:
raise ValueError("array data type is invalid!")
return processed
def __process__(self, rawFrame : np.ndarray, tags: DNGTags, compress : bool) -> bytearray:
width = tags.get(Tag.ImageWidth).rawValue[0]
length = tags.get(Tag.ImageLength).rawValue[0]
bpp = tags.get(Tag.BitsPerSample).rawValue[0]
compression_scheme = Compression.LJ92 if compress else Compression.Uncompressed
sample_format = SampleFormat.Uint
backward_version = DNGVersion.V1_0
if rawFrame.dtype == np.float32:
sample_format = SampleFormat.FloatingPoint
# Floating-point data requires DNG 1.4
backward_version = DNGVersion.V1_4
# Floating-point data has to be compressed with deflate
if compress:
raise Exception('Compression is not supported for floating-point data')
if compress:
from ljpegCompress import pack16tolj
tile = pack16tolj(rawFrame, int(width*2),
int(length/2), bpp, 0, 0, 0, "", 6)
else:
if bpp == 8:
tile = rawFrame.astype('uint8').tobytes()
elif bpp == 10:
tile = pack10(rawFrame).tobytes()
elif bpp == 12:
tile = pack12(rawFrame).tobytes()
elif bpp == 14:
tile = pack14(rawFrame).tobytes()
else:
# 16-bit integers or 32-bit floats
tile = rawFrame.tobytes()
dngTemplate = DNG()
dngTemplate.ImageDataStrips.append(tile)
# set up the FULL IFD
mainIFD = dngIFD()
mainTagStripOffset = dngTag(
Tag.StripOffsets, [0 for tile in dngTemplate.ImageDataStrips])
mainIFD.tags.append(mainTagStripOffset)
mainIFD.tags.append(dngTag(Tag.NewSubfileType, [0]))
mainIFD.tags.append(dngTag(Tag.StripByteCounts, [len(
tile) for tile in dngTemplate.ImageDataStrips]))
mainIFD.tags.append(dngTag(Tag.Compression, [compression_scheme]))
mainIFD.tags.append(dngTag(Tag.Software, "PiDNG"))
mainIFD.tags.append(dngTag(Tag.DNGVersion, DNGVersion.V1_4))
mainIFD.tags.append(dngTag(Tag.DNGBackwardVersion, backward_version))
mainIFD.tags.append(dngTag(Tag.SampleFormat, [sample_format]))
for tag in tags.list():
try:
mainIFD.tags.append(tag)
except Exception as e:
print("TAG Encoding Error!", e, tag)
dngTemplate.IFDs.append(mainIFD)
totalLength = dngTemplate.dataLen()
mainTagStripOffset.setValue(
[k for offset, k in dngTemplate.StripOffsets.items()])
buf = bytearray(totalLength)
dngTemplate.setBuffer(buf)
dngTemplate.write()
return buf
def options(self, tags : DNGTags, path : str, compress=False) -> None:
self.__tags_condition__(tags)
self.tags = tags
self.compress = compress
self.path = path
def convert(self, image : np.ndarray, filename=""):
if self.tags is None:
raise Exception("Options have not been set!")
# valdify incoming data
self.__data_condition__(image)
unpacked = self.__unpack_pixels__(image)
filtered = self.__filter__(unpacked, self.filter)
buf = self.__process__(filtered, self.tags, self.compress)
file_output = False
if len(filename) > 0:
file_output = True
if file_output:
if not filename.endswith(".dng"):
filename = filename + '.dng'
outputDNG = os.path.join(self.path, filename)
with open(outputDNG, "wb") as outfile:
outfile.write(buf)
return outputDNG
else:
return buf
class RAW2DNG(DNGBASE):
def __init__(self) -> None:
super().__init__()
class CAM2DNG(DNGBASE):
def __init__(self, model : BaseCameraModel) -> None:
super().__init__()
self.model = model
def options(self, path : str, compress=False) -> None:
self.__tags_condition__(self.model.tags)
self.tags = self.model.tags
self.compress = compress
self.path = path
class RPICAM2DNG(CAM2DNG):
def __data_condition__(self, data : np.ndarray) -> None:
if data.dtype != np.uint8:
warnings.warn("RAW Data is not in correct format. Already unpacked? ")
def __unpack_pixels__(self, data : np.ndarray) -> np.ndarray:
if data.dtype != np.uint8:
return data
width, height = self.model.fmt.get("size", (0,0))
stride = self.model.fmt.get("stride", 0)
bpp = self.model.fmt.get("bpp", 8)
# check to see if stored packed or unpacked format
if "CSI2P" in self.model.fmt.get("format", ""):
s_bpp = bpp # stored_bitperpixel
else:
s_bpp = 16
bytes_per_row = int(width * (s_bpp / 8))
data = data[:height, :bytes_per_row]
if s_bpp == 10:
data = data.astype(np.uint16) << 2
for byte in range(4):
data[:, byte::5] |= ((data[:, 4::5] >> ((byte+1) * 2)) & 0b11)
data = np.delete(data, np.s_[4::5], 1)
elif s_bpp == 12:
data = data.astype(np.uint16)
shape = data.shape
unpacked_data = np.zeros((shape[0], int(shape[1] / 3 * 2)), dtype=np.uint16)
unpacked_data[:, ::2] = (data[:, ::3] << 4) + (data[:, 2::3] & 0x0F)
unpacked_data[:, 1::2] = (data[:, 1::3] << 4) + ((data[:, 2::3] >> 4) & 0x0F)
data = unpacked_data
elif s_bpp == 16:
data = np.ascontiguousarray(data).view(np.uint16)
return data
class PICAM2DNG(RPICAM2DNG):
"""For use within picamera2 library"""
def options(self, compress=False) -> None:
self.__tags_condition__(self.model.tags)
self.tags = self.model.tags
self.compress = compress
self.path = ""
| schoolpost/PiDNG | src/pidng/core.py | core.py | py | 7,645 | python | en | code | 172 | github-code | 50 |
3768099499 | import random
import time
# Helper functions
def hit(who, times):
for _ in range(times):
card = random.choice(deck)
deck.remove(card)
who.append(card)
def total(who):
total = 0
for x in who:
if isinstance(x, int):
total += x
elif x in "JQK":
total += 10
else:
if total < 11:
total += 11
else:
total += 1
return total
def show_hand(dealer=False, hide=False):
if dealer:
if hide:
print(f'Dealer Hand:{dealer_hand[0]}, ?')
return
print(f'Dealer Hand:{dealer_hand} Total:{total(dealer_hand)}')
else:
print(f'Your Hand:{player_hand} Total:{total(player_hand)}')
def generate_deck():
deck = []
for x in range(2, 11):
for _ in range(4):
deck.append(x)
for x in ['A', 'J', 'Q', 'K']:
for _ in range(4):
deck.append(x)
return deck
player_credits = 100
bet = 0
# Game loop
while player_credits > 0:
deck = generate_deck()
player_hand = []
dealer_hand = []
def play():
global player_credits
print(f'You have {player_credits} points')
# Deal
hit(player_hand, 2)
hit(dealer_hand, 2)
# Bet
while True:
try:
bet = float(input("Place your bet\n"))
if bet <= 0:
print('You can\'t bet nothing.🙃')
elif bet > player_credits:
print('You don\'t have enough credits.😐')
else:
break
except ValueError:
print('Invalid input.🚫')
show_hand(dealer=True, hide=True)
time.sleep(2)
show_hand()
# BlackJack
if total(player_hand) == 21:
if total(dealer_hand) == 21:
print(f'Dealer Hand:{dealer_hand}')
print('BlackJack Tie!🤯')
return
print('---- ♠BlackJack!, You Win!🍀 ----')
print(f'You earned: {bet * 1.5}')
player_credits += bet * 1.5
return
# Double Down
if total(player_hand) in range(9, 12):
while True:
print('Double Down?🤑')
double = input('1:yes \n2:no\n')
if double == '1':
if player_credits < bet:
print('You dont have enough credits.😮')
break
else:
player_credits -= bet
print('You doubled your bet.🤪')
hit(player_hand, 1)
time.sleep(2)
show_hand()
# Final Check
if total(player_hand) > 21:
print('----You Busted on a 2x bet!, Dealer wins.😭 ----')
player_credits -= bet * 2
return
elif total(player_hand) > total(dealer_hand):
show_hand(dealer=True)
print('----You won a 2x bet!😎 ----')
player_credits += bet * 2
return
elif total(player_hand) == total(dealer_hand):
print('----Tie!😐----')
return
else:
show_hand(dealer=True)
print('----Dealer won a 2x bet.😰 ----')
player_credits -= bet * 2
return
elif double == '2':
break
else:
print('Invalid input.🚫')
# Player hit cycle
while True:
next_move = input('🤔\n1: Hit\n2: Stay\n')
if next_move == '1':
hit(player_hand, 1)
show_hand()
# Bust Check
if total(player_hand) > 21:
print('----You Busted!, Dealer wins.😒 ----')
player_credits -= bet
return
elif next_move == '2':
break
else:
print('Invalid Input.🚫')
show_hand(dealer=True)
# Dealer Hit Cycle
while total(dealer_hand) < 17:
hit(dealer_hand, 1)
print('Dealer Hit!')
show_hand(dealer=True)
time.sleep(2)
# Bust Check
if total(dealer_hand) > 21:
print('----Dealer Busted, You Win!😂 ----')
print(f'You earned: {bet}')
player_credits += bet
return
# Final Check
if total(player_hand) > total(dealer_hand):
time.sleep(2)
print('----You have the best hand!, You Win.😁 ----')
print(f'You earned: {bet}')
player_credits += bet
return
elif total(player_hand) == total(dealer_hand):
time.sleep(2)
print('----It\'s a Tie!🤷----')
return
else:
time.sleep(2)
print('----Dealer have the best hand, Dealer Wins.😥 ----')
player_credits -= bet
return
play()
| Roberto-Yudi/Blackjack | blackjack.py | blackjack.py | py | 5,458 | python | en | code | 0 | github-code | 50 |
23351154201 | from django.shortcuts import render
from django.contrib.auth.decorators import login_required, permission_required
from .models import EmployeeInstance, Employee
from django.shortcuts import get_object_or_404
from django.http import HttpResponseRedirect
from django.urls import reverse
import datetime
from .forms import AddEmployeeInstanceForm, SearchEmployeeInstanceForm
from django.utils import timezone
@permission_required('PeopleOnBoard.can_add_instance')
@login_required
def addEmployeeInstance(request):
now = timezone.now()
# If this is a POST request then process the Form view
if request.method == 'POST':
# Create a form instance and populate it with view from the request (binding):
form = AddEmployeeInstanceForm(request.POST)
# Check if the form is valid:
if form.is_valid():
emp_inst = EmployeeInstance()
emp_inst.assigned_work_place_object = form.cleaned_data['assigned_work_place']
emp_inst.employee_object = form.cleaned_data['employee']
if form.cleaned_data['start_time']:
emp_inst.start_time = form.cleaned_data['start_time']
#
# if form.cleaned_data > 15:
# emp_inst.squad_order = '1'
# else:
# emp_inst.squad_order = '2'
else:
emp_inst.start_time = now
#
# if now.day > 15:
# emp_inst.squad_order = '1'
# else:
# emp_inst.squad_order = '2'
emp_inst.note_name = form.cleaned_data['note']
emp_inst.is_terminated = False
emp_inst.assigner_object = request.user
emp_inst.created_time = datetime.datetime.now()
emp_inst.save()
emp = get_object_or_404(Employee, pk=form.cleaned_data['employee'].pk)
emp.status = 'r'
emp.save()
return HttpResponseRedirect(reverse('search'))
# If this is a GET (or any other method) form the entrance_card form.
else:
form = AddEmployeeInstanceForm()
context = {
'form': form,
}
return render(request,
'pobapp/search.html',
context=context,)
@login_required()
def pob(request):
emp = Employee
if request.method == 'POST':
form = SearchEmployeeInstanceForm(request.POST)
formadd = AddEmployeeInstanceForm(request.POST)
if form.is_valid():
emp_inst = EmployeeInstance.objects.all()
query_set = emp_inst.select_related('assigner_object', 'employee_object', 'assigned_work_place_object').filter(
assigned_work_place_object_id__exact=form.cleaned_data['assigned_work_place'],
employee_object__company_object__company_name__icontains=form.cleaned_data['company'],
start_time__gte=form.cleaned_data['start_time'],
end_time__lte=form.cleaned_data['end_time'],
employee_object__profession_object_id__exact=form.cleaned_data['profession'],
employee_object__work_id_card_name__icontains=form.cleaned_data['work_id'],
employee_object__id_card_name__icontains=form.cleaned_data['id_card'],
employee_object__transportation_object_id__exact=form.cleaned_data['transport']
)
context = {
'query_set': query_set
}
return render(request,
'pobapp/table.html',
context)
else:
form = SearchEmployeeInstanceForm()
formadd = AddEmployeeInstanceForm()
emp_count_a = emp.objects.filter(status__exact='a')
context = {
'form': form,
'formadd': formadd,
'emp_count_a': emp_count_a
}
return render(request,
'pobapp/search.html',
context=context)
@login_required
def view_update(request):
emp = EmployeeInstance.objects.filter(end_time=None)
context = {
'query_set': emp
}
return render(request,
'pobapp/update.html',
context=context)
@login_required
def view_table(request):
return render(request, 'pobapp/table.html')
@permission_required('PeopleOnBoard.can_update_instance')
@login_required
def update_employee_instance(request, pk):
if request.method == 'POST':
now = timezone.now()
emp_inst = get_object_or_404(EmployeeInstance, pk=pk)
emp = get_object_or_404(Employee, pk=emp_inst.employee_object.pk)
emp_inst.end_time = now
emp_inst.is_terminated = True
emp_inst.save()
emp.status = 'a'
emp.save()
return HttpResponseRedirect(reverse('update')) | rarblack/intranet | PeopleOnBoard/views.py | views.py | py | 4,831 | python | en | code | 0 | github-code | 50 |
20613847786 | import numpy as np
import cv2
import matplotlib.pyplot as plt
from sklearn.cluster import KMeans
Image_Height=28
Image_Width=28
def get_image(name):
image= np.fromfile(str(name), dtype='uint8', sep="")
image=image.reshape([28, 28])
img_resized=cv2.resize(image,(56,56))
return img_resized
zero1=get_image('zero_1.raw')
zero2=get_image('zero_2.raw')
zero3=get_image('zero_3.raw')
zero4=get_image('zero_4.raw')
zero5=get_image('zero_5.raw')
one1=get_image('one_1.raw')
one2=get_image('one_2.raw')
one3=get_image('one_3.raw')
one4=get_image('one_4.raw')
one5=get_image('one_5.raw')
img_array=[zero1,zero2,zero3,zero4,zero5,one1,one2,one3,one4,one5]
sift = cv2.xfeatures2d.SIFT_create()
des=[]
for i in img_array:
kp,d=sift.detectAndCompute(i,None)
des.append(d)
print(len(des[1]))
new_des=[]
count=0
for i in des:
for j in range(0,len(i)):
new_des.append(des[count][j])
count+=1
new_des=np.array(new_des)
print(new_des.shape)
kmeans=KMeans(n_clusters=2)
kmeans.fit(new_des)
labels=kmeans.predict(new_des)
centroids=kmeans.cluster_centers_
eight=get_image('eight.raw')
kp8,d8=sift.detectAndCompute(eight,None)
vocab_list=[]
#print(len(centroids[1]))
for i in d8:
if np.linalg.norm(i-centroids[1]) > np.linalg.norm(i-centroids[0]):
vocab_list.append('zero')
else:
vocab_list.append('one')
print(vocab_list)
frequency_0=0
frequency_1=0
for i in vocab_list:
if i=='zero':
frequency_0+=1
else:
frequency_1+=1
#img=cv2.drawKeypoints(one1,kp8,eight,flags=cv2.DRAW_MATCHES_FLAGS_DRAW_RICH_KEYPOINTS)
#img=cv2.cvtColor(img,cv2.COLOR_BGR2RGB)
#cv2.imwrite('one_KP.jpg',img)
plt.hist(vocab_list)
plt.xlabel('codebook (centroid)')
plt.ylabel('frequency')
plt.title('bag of words histogram')
plt.show()
| slr248/EE569 | SIFT and Bag of Words/bag_of_words.py | bag_of_words.py | py | 1,803 | python | en | code | 0 | github-code | 50 |
12907939397 | from jesse.services.db import database
from playhouse.migrate import *
from jesse.enums import migration_actions
import click
def run():
"""
Runs migrations per each table and adds new fields in case they have not been added yet.
Accepted action types: add, drop, rename, modify_type, allow_null, deny_null
If actions type is 'rename', you must add new field with 'old_name' key.
To make column to not nullable, you must clean all null value of columns.
"""
click.clear()
print('Checking for new database migrations...')
database.open_connection()
# create migrator
migrator = PostgresqlMigrator(database.db)
# run migrations
_candle(migrator)
_completed_trade(migrator)
_daily_balance(migrator)
_log(migrator)
_order(migrator)
_orderbook(migrator)
_ticker(migrator)
_trade(migrator)
# create initial tables
from jesse.models import Candle, ClosedTrade, Log, Order, Option
database.db.create_tables([Candle, ClosedTrade, Log, Order])
database.close_connection()
def _candle(migrator):
fields = [
{'action': migration_actions.ADD, 'name': 'timeframe', 'type': CharField(index=False, null=True)},
{'action': migration_actions.DROP_INDEX, 'indexes': ('exchange', 'symbol', 'timestamp')},
{'action': migration_actions.ADD_INDEX, 'indexes': ('exchange', 'symbol', 'timeframe', 'timestamp'), 'is_unique': True},
]
if 'candle' in database.db.get_tables():
candle_columns = database.db.get_columns('candle')
_migrate(migrator, fields, candle_columns, 'candle')
def _completed_trade(migrator):
fields = []
if 'completedtrade' in database.db.get_tables():
completedtrade_columns = database.db.get_columns('completedtrade')
_migrate(migrator, fields, completedtrade_columns, 'completedtrade')
def _daily_balance(migrator):
fields = []
if 'dailybalance' in database.db.get_tables():
dailybalance_columns = database.db.get_columns('dailybalance')
_migrate(migrator, fields, dailybalance_columns, 'dailybalance')
def _log(migrator):
fields = []
if 'log' in database.db.get_tables():
log_columns = database.db.get_columns('log')
_migrate(migrator, fields, log_columns, 'log')
def _order(migrator):
fields = [
# {'name': 'session_id', 'type': UUIDField(index=True, null=True), 'action': migration_actions.ADD},
# {'name': 'trade_id', 'type': UUIDField(index=True, null=True), 'action': migration_actions.ALLOW_NULL},
# {'name': 'exchange_id', 'type': CharField(null=True), 'action': migration_actions.ALLOW_NULL},
# {'name': 'price', 'type': FloatField(null=True), 'action': migration_actions.ALLOW_NULL},
# {'name': 'flag', 'type': CharField(default=False), 'action': migration_actions.DROP},
# {'name': 'role', 'type': CharField(default=False), 'action': migration_actions.DROP},
# {'name': 'filled_qty', 'type': FloatField(default=0), 'action': migration_actions.ADD},
# {'name': 'reduce_only', 'type': BooleanField(default=False), 'action': migration_actions.ADD},
]
if 'order' in database.db.get_tables():
order_columns = database.db.get_columns('order')
_migrate(migrator, fields, order_columns, 'order')
def _orderbook(migrator):
fields = []
if 'orderbook' in database.db.get_tables():
orderbook_columns = database.db.get_columns('orderbook')
_migrate(migrator, fields, orderbook_columns, 'orderbook')
def _ticker(migrator):
fields = []
if 'ticker' in database.db.get_tables():
ticker_columns = database.db.get_columns('ticker')
_migrate(migrator, fields, ticker_columns, 'ticker')
def _trade(migrator):
fields = []
if 'trade' in database.db.get_tables():
trade_columns = database.db.get_columns('trade')
_migrate(migrator, fields, trade_columns, 'trade')
def _migrate(migrator, fields, columns, table):
for field in fields:
if field['action'] in [migration_actions.ADD_INDEX, migration_actions.DROP_INDEX]:
indexes: list = database.db.get_indexes(table)
to_migrate_indexes: list = field['indexes']
to_migrate_indexes_str = f'{table}_'
for t in to_migrate_indexes:
to_migrate_indexes_str += f'{t}_'
to_migrate_indexes_str = to_migrate_indexes_str[:-1]
already_exists = False
for index in indexes:
existing_indexes_str: list = index.name
if to_migrate_indexes_str == existing_indexes_str:
already_exists = True
break
if field['action'] == migration_actions.ADD_INDEX:
if not already_exists:
migrate(
migrator.add_index(table, field['indexes'], field['is_unique'])
)
print(f'Added index {field["indexes"]} to {table}')
if field['action'] == migration_actions.DROP_INDEX:
if already_exists:
migrate(
migrator.drop_index(table, to_migrate_indexes_str)
)
print(f'Dropped index {field["indexes"]} from the "{table}" table')
else: # else, fist check if the field exists
column_name_exist = any(field['name'] == item.name for item in columns)
if column_name_exist:
if field['action'] == migration_actions.ADD:
pass
elif field['action'] == migration_actions.DROP:
migrate(
migrator.drop_column(table, field['name'])
)
print(f"Successfully dropped '{field['name']}' column from the "'{table}'" table.")
elif field['action'] == migration_actions.RENAME:
migrate(
migrator.rename_column(table, field['name'], field['new_name'])
)
print(f"'{field['name']}' column successfully changed to {field['new_name']} in the '{table}' table.")
elif field['action'] == migration_actions.MODIFY_TYPE:
migrate(
migrator.alter_column_type(table, field['name'], field['type'])
)
print(
f"'{field['name']}' field's type was successfully changed to {field['type']} in the '{table}' table.")
elif field['action'] == migration_actions.ALLOW_NULL:
migrate(
migrator.drop_not_null(table, field['name'])
)
print(f"'{field['name']}' column successfully updated to accept nullable values in the '{table}' table.")
elif field['action'] == migration_actions.DENY_NULL:
migrate(
migrator.add_not_null(table, field['name'])
)
print(
f"'{field['name']}' column successfully updated to accept to reject nullable values in the '{table}' table.")
# if column name doesn't not already exist
else:
if field['action'] == migration_actions.ADD:
migrate(
migrator.add_column(table, field['name'], field['type'])
)
print(f"'{field['name']}' column successfully added to '{table}' table.")
else:
print(f"'{field['name']}' field does not exist in '{table}' table.")
| jesse-ai/jesse | jesse/services/migrator.py | migrator.py | py | 7,706 | python | en | code | 4,933 | github-code | 50 |
16360419858 | import os
import shutil
import datetime
import torch as t
from typing import Any
from tqdm.auto import tqdm
from dataclasses import dataclass
from torch.utils.tensorboard import SummaryWriter
from Trainer import MetricsManager
@dataclass
class BaseTrainer:
optimizer: Any
model: Any
train_iter: Any
dev_iter: Any
test_iter: Any
ckpt_root: str = 'ckpt/'
exp_name: str = 'base_exp'
log_every_iter: int = 100
eval_every_iter: int = 1000
save_every_iter: int = 5000
drop_exp: bool = True
reference = '-loss'
def __post_init__(self):
if self.exp_name is None:
self.exp_name = self.get_time()
self.exp_root = os.path.join(self.ckpt_root, self.exp_name)
#print(f'exps: {os.listdir(self.ckpt_root)})')
self.global_step = 0
self.global_epoch = 0
if self.drop_exp and os.path.exists(self.exp_root): #TODO delete if use
print(f'droped {self.exp_root}')
shutil.rmtree(self.exp_root)
os.mkdir(self.exp_root)
self.summary_writer = SummaryWriter(self.exp_root)
self.config = self.model.config
assert self.reference[0] in ['-', '+']
def train(self, from_ckpt=None):
self.best = 1e10 if self.reference[0] == '-' else 0
# if from_ckpt is not None:
# exp_name =
# self.load_from_ckpt(exp_name=, epoch=, step=)
for i in range(self.config.num_epoch):
self.train_epoch()
self.global_epoch += 1
def train_epoch(self):
self.model.train()
train_bar = tqdm(iterable=self.train_iter, leave=True, total=len(self.train_iter))
for data in train_bar:
metrics, _ = self.model.iterate(data, optimizer=self.optimizer, is_train=True)
if self.global_step % self.log_every_iter == 0 and self.global_step != 0:
self.summarize(metrics, 'train/')
self.global_step += 1
if self.global_step % self.eval_every_iter == 0 and self.global_step != 0:
self.evaluate(self.dev_iter, 'dev/')
if self.global_step % self.save_every_iter == 0 and self.global_step != 0:
self.save_ckpt(metrics[self.reference[1:]])
desc = f'Train-epoch: {self.global_epoch}, loss: {metrics.loss.item()}, cer: {metrics.cer.item()}'
train_bar.set_description(desc)
print(f'in train epoch:{self.global_epoch}, average_loss{1} average_score{1}')#TODO use true value
self.save_ckpt(metrics[self.reference[1:]])
self.evaluate(self.test_iter, 'test/')
def load_from_ckpt(self, exp_name, epoch, step):
prefix = f'e{epoch}_s{step}'
self.global_step = step
self.global_epoch = epoch
model_file = os.path.join(self.ckpt_root, exp_name, prefix+'.model')
opt_file = os.path.join(self.ckpt_root, exp_name, prefix+'.opt')
self.model.load(model_file)
self.optimizer.load(opt_file)
self.config = self.model.config
print(f'train state loaded from {os.path.join(self.ckpt_root, exp_name)}_epoch:{epoch} step:{step}\n')
def save_ckpt(self, reference_score):
prefix = f'e{self.global_epoch}_s{self.global_step}'
model_file = os.path.join(self.exp_root, prefix+'.model')
opt_file = os.path.join(self.exp_root, prefix+'.opt')
self.model.save(model_file)
self.optimizer.save(opt_file)
print(f'train state saved to {self.exp_root}_epoch:{self.global_epoch} step:{self.global_step}\n')
# if (self.reference[0] == '-' and reference_score < self.best) or\
# (self.reference[0] == '+' and reference_score > self.best):
# self.copy_best()
# self.best = reference_score
#
# def copy_best(self):
# shutil
def summarize(self, pack, prefix='train/'):
# print(f'\nsummarizing in {self.global_step}')
for i in pack:
tmp_prefix = prefix + i
self.summary_writer.add_scalar(tmp_prefix, pack[i].detach().cpu().numpy(), self.global_step)
def evaluate(self, dev_iter, prefix='dev/'):
print(f'\nEvaluating')
self.model.eval()
dev_metric_manager = MetricsManager()
dev_bar = tqdm(dev_iter, leave=True, total=len(dev_iter), disable=True)
with t.no_grad():
for data in dev_iter:
metrics, _ = self.model.iterate(data, is_train=False)
dev_metric_manager.update(metrics)
desc = f'Valid-loss: {metrics.loss.item()}, cer: {metrics.cer.item()}'
dev_bar.set_description(desc)
print(f'\nValid, average_loss: {1}, average_score: {1}')#TODO use true value
report = dev_metric_manager.report_cum()
report = dev_metric_manager.extract(report)
self.summarize(report, 'dev/')
self.model.train()
def get_time(self):
return (datetime.datetime.now() + datetime.timedelta(hours=8)).strftime("%Y%m%d_%H%M")
| zqs01/ASR_chinese_e2e | Trainer/base_trainer.py | base_trainer.py | py | 5,028 | python | en | code | 0 | github-code | 50 |
14510063048 | import random
import emoji
# def level(level , attempt):
def check_guess(user_guess , random_number , attempts , level):
run_out_guesses = False
if level == "easy":
attempts = 6
if user_guess == random_number:
print(f"You have won the game")
else:
attempts -= 1
while not run_out_guesses and attempts != 0:
user_guess = int(input(f"Wrong input ****\n** you have {attempts} remaining**\n try once more guess the number : "))
if user_guess == random_number:
print(f"You have won the game")
run_out_guesses = True
elif attempts == 0:
print(f"You have lost the game")
break
else :
attempts -= 1
elif level == "hard":
attempts = 3
if user_guess == random_number:
print(f"You have won the game")
else:
attempts -= 1
while not run_out_guesses and attempts != 0:
user_guess = int(input(f"Wrong input ****\n** you have {attempts} remaining**\n try once more guess the number : "))
if user_guess == random_number:
print(f"You have won the game")
break
elif attempts == 0:
print(f"You have lost the game")
break
else :
attempts -= 1
else:
return f"Wrong input level"
attempts = 0
print("Welcome to Guessing game !")
print("I'm thinking of a number between 1 and 100")
level = input("Choose a difficulty . type 'easy' or 'hard' ").lower()
user_guess = int(input("Guess the number : "))
random_number = random.randint(1,5)
check_guess(user_guess, random_number, attempts, level )
# guess(chances=chance , level=level , user_guess=user_guess)
# print(emoji.emojize(":winking_face_with_tongue:"))
# print("\N{grinning face}")
# print(emoji.emojize('Python is :thumbsup:', language='alias')) | tonylloyd2/coding-room | python_workspace/guessgame/guessgame_advanced.py | guessgame_advanced.py | py | 2,076 | python | en | code | 4 | github-code | 50 |
33399347917 | from PyQt4 import QtGui, uic
from windows.controllers.organization_interface.orders_adder import OrderAdder
from windows.widgets.path import ORGANiZATION_ORDERS
from models.organizations import Organizations
class OrdersView(QtGui.QWidget):
_path = ORGANiZATION_ORDERS
def __init__(self, stacked_widget, *args, **kwargs):
super(OrdersView, self).__init__(*args, **kwargs)
uic.loadUi(self._path, self)
self.stacked_widget = stacked_widget
self.stacked_widget.addWidget(self)
self.ancestor = args[0]
self.organization = self.ancestor.organization
self.organization_id = self.ancestor.organization_id
self.orders_adder = OrderAdder(stacked_widget, self)
self.column_index = {}
self.model = QtGui.QStandardItemModel()
self.update_info()
self.orders_view.setModel(self.model)
self.add.clicked.connect(self.add_order)
self.back.clicked.connect(self.on_back)
def on_cancel(self):
self.update_info()
self.stacked_widget.setCurrentWidget(self)
def add_order(self):
self.stacked_widget.setCurrentWidget(self.orders_adder)
def on_back(self):
self.ancestor.on_back()
def update_info(self):
orders = Organizations().get_orders(self.organization_id)
# orders = filter(
# lambda x: x['producer_id'] == self.organization_id,
# orders
# )
# for order in orders:
# order.pop('producer_id')
self.model.clear()
header_labels = [key for key in orders[0]] if len(orders) > 0 else\
['id']
self.model.setHorizontalHeaderLabels(header_labels)
row_count = 0
for order in orders:
column_count = 0
for key, value in order.iteritems():
self.model.setItem(row_count, column_count,
QtGui.QStandardItem(str(value)))
column_count += 1
row_count += 1
| Belyashi/LogisticTask | windows/controllers/organization_interface/orders_view.py | orders_view.py | py | 2,018 | python | en | code | 0 | github-code | 50 |
7016078014 | import time
import psycopg2
import requests
from src.utils import config
class Parser:
"""Класс для парсинга"""
def __init__(self, url: str, employer: str):
self.employer_url = None
self.url = url
self.employer = employer
def get_employers(self):
"""Метод для получения списка работодателей с платформы"""
employers_lst = []
for page in range(20):
params = {'per_page': 100,
'page': page,
'text': self.employer,
'search_field': 'name',
'order_by': "publication_time",
'archived': False,
}
vacancies = requests.get(self.url, params=params).json()
employers_lst.extend(vacancies['items'])
for el in employers_lst:
if el['name'] == self.employer and el['open_vacancies'] > 10:
self.employer_url = el['vacancies_url']
return [el['id'], el['name'], el['open_vacancies'], el['vacancies_url']]
def get_vacancies(self):
"""Метод для получения списка вакансий определенного работодателя по Москве"""
vacancies_lst = []
for page in range(20):
params = {'per_page': 100,
'page': page,
'search_field': 'name',
'area': 1,
'order_by': "publication_time",
'archived': False,
}
vacancies = requests.get(self.employer_url, params=params).json()
vacancies_lst.extend(vacancies['items'])
if (vacancies['pages'] - page) <= 1:
break
time.sleep(0.5)
return vacancies_lst
class DBCreator:
"""Класс для создания и заполнения таблиц"""
def __init__(self, db_name: str):
self.__params = config()
self.conn = psycopg2.connect(dbname='postgres', **self.__params)
self.conn.autocommit = True
self.cur = self.conn.cursor()
self.cur.execute(f"DROP DATABASE IF EXISTS {db_name}")
self.cur.execute(f"CREATE DATABASE {db_name}")
self.conn.close()
self.__params.update({'dbname': db_name})
self.conn = psycopg2.connect(**self.__params)
self.conn.autocommit = True
self.cur = self.conn.cursor()
def create_employers_table(self):
self.cur.execute('''CREATE TABLE employers
(
company_id int PRIMARY KEY,
company_name varchar(100) NOT NULL,
open_vacancies int
)'''
)
def create_vacancies_table(self):
self.cur.execute('''CREATE TABLE vacancies (
vacancy_id int NOT NULL,
company_id int NOT NULL,
title varchar(100),
salary_from int,
salary_to int,
vacancy_url varchar(100),
description text
)''')
self.cur.execute("""ALTER TABLE vacancies ADD CONSTRAINT fk_company_id
FOREIGN KEY(company_id) REFERENCES employers(company_id)""")
def into_table_employers(self, *args, name: str):
self.cur.execute(f"INSERT INTO {name} VALUES {args}")
def into_table_vacancies(self, vac: str):
self.cur.execute(f"INSERT INTO vacancies VALUES(%s, %s, %s, %s, %s, %s, %s)", vac)
def cur_close(self):
return self.cur.close()
def conn_close(self):
return self.conn.close()
class DBManager:
"""Класс для работы с данными в БД."""
def __init__(self, db_name: str):
self.__params = config()
self.__params.update({'dbname': db_name})
self.conn = psycopg2.connect(**self.__params)
self.conn.autocommit = True
self.cur = self.conn.cursor()
def cur_close(self):
return self.cur.close()
def conn_close(self):
return self.conn.close()
def get_companies_and_vacancies_count(self):
"""Получает список всех компаний и количество вакансий у каждой компании."""
self.cur.execute("SELECT * FROM employers")
result = self.cur.fetchall()
for row in result:
print(f'Компания "{row[1]}", открыто вакансий: {row[2]}')
print('')
def get_all_vacancies(self):
"""Получает список всех вакансий с указанием названия компании,
названия вакансии и зарплаты и ссылки на вакансию."""
self.cur.execute("""SELECT company_name, title, salary_from, salary_to, vacancy_url FROM employers
FULL JOIN vacancies USING(company_id)""")
result = self.cur.fetchall()
for row in result:
if row[2] is None and row[3] is None:
salary = 'Не указана'
elif row[2] is None and row[3] is not None:
salary = f'до {row[3]}'
elif row[2] is not None and row[3] is None:
salary = f'от {row[2]}'
elif row[2] == row[3]:
salary = row[2]
else:
salary = f'{row[2]} - {row[3]}'
print(f'Компания "{row[0]}", Вакансия: "{row[1]}", зарплата: {salary}, ссылка на вакансию: "{row[4]}"')
print('')
def get_avg_salary(self):
"""Получает среднюю зарплату по вакансиям."""
self.cur.execute("SELECT CEIL((AVG(salary_from) + AVG(salary_to))/2) AS average_salary FROM vacancies")
result = str(self.cur.fetchall())
avg_salary = ''
for el in result:
if el.isdigit():
avg_salary += el
print(f'Средняя зарплата: {avg_salary} рублей')
print('')
def get_vacancies_with_higher_salary(self):
"""Получает список всех вакансий, у которых зарплата выше средней по всем вакансиям."""
self.cur.execute("""SELECT company_name, title, salary_from, salary_to, description, vacancy_url FROM employers
FULL JOIN vacancies USING(company_id)
WHERE salary_to > (SELECT CEIL((AVG(salary_from) + AVG(salary_to))/2) FROM vacancies)
OR salary_from > (SELECT CEIL((AVG(salary_from) + AVG(salary_to))/2) FROM vacancies)""")
result = self.cur.fetchall()
for row in result:
if row[2] is None and row[3] is None:
salary = 'Не указана'
elif row[2] is None and row[3] is not None:
salary = f'до {row[3]}'
elif row[2] is not None and row[3] is None:
salary = f'от {row[2]}'
elif row[2] == row[3]:
salary = row[2]
else:
salary = f'{row[2]} - {row[3]}'
print(f'Компания "{row[0]}", Вакансия: "{row[1]}", зарплата: {salary}, описание: "{row[4]}",'
f'ссылка на вакансию: "{row[5]}"')
print('')
def get_vacancies_with_keyword(self, keyword: str):
"""Получает список всех вакансий, в названии которых содержатся переданные в метод слова, например 'python'."""
self.cur.execute(f"""SELECT company_name, title, vacancy_url FROM employers
FULL JOIN vacancies USING(company_id) WHERE title LIKE '%{keyword}%'""")
result = self.cur.fetchall()
if not result or result == []:
print('Нет вакансий, удовлетворяющих условиям поиска.')
for row in result:
print(f'Компания "{row[0]}", вакансия: {row[1]}, ссылка на вакансию: {row[2]}')
print('')
def user_query(self, query: str):
"""Запрос пользователя"""
self.cur.execute(query)
result = self.cur.fetchall()
for row in result:
print(row)
print('')
| MaksimPakhomov22/Parse_Vacancies_HH.ru__SQL | src/classes.py | classes.py | py | 8,953 | python | ru | code | 1 | github-code | 50 |
23308369789 | #!/usr/bin/env python
# coding: utf-8
# In[36]:
class Queue:
inner_list = [] #que is made out of a list
top = 0 #top will point to position that next item gets inserted (back of the que)
#(initiated as zero becuz front = back = 0)
def enqueue(self, value):
self.inner_list.insert(self.top, value) #use .insert(index, item) on que's inner list. index is top (back of que)
self.top = self.top + 1 #increase top by 1. once first item is inserted, that item's position is 0
#and top points to pos=1
def dequeue(self):
value = self.inner_list.pop(0) #pop the first (0th) item in list (front of the que)
return value #return that item
#Note: function deletes first instance of value
def delete(self, value):
length = 0 #initiate length variable
delete_index = None #and deletion index variable
for x in self.inner_list: #Find 'index' of item to be deleted
if (x == value) and (delete_index == None): #second logical added so it will
delete_index = length #delete FIRST instance
length += 1
for i in range(length): #Loop though queue
if (i != delete_index): #move any item except at the tracked index
self.enqueue(self.dequeue()) #from the front to the back of the line
else:
self.dequeue() #move item at the tracked index to nowhere
obj = Queue()
obj.enqueue(5)
obj.enqueue(7)
obj.enqueue(13)
obj.enqueue(4)
obj.enqueue(7)
obj.delete(7)
print(obj.dequeue()) # Should return 5
# In[35]:
ob1 = Queue()
ob1.inner_list.clear() #
ob1.enqueue(2)
ob1.enqueue(9)
ob1.enqueue(7)
ob1.enqueue(9)
ob1.enqueue(11)
print(ob1.inner_list)
ob1.delete(9)
print(ob1.inner_list)
| ZachOhl/HW-6 | 2/hw6_02.py | hw6_02.py | py | 2,001 | python | en | code | 0 | github-code | 50 |
9118724663 | from kivy.app import App
from kivy.clock import Clock
from kivy.uix.floatlayout import FloatLayout
from kivy.uix.boxlayout import BoxLayout
from kivy.uix.button import Button
from functools import partial
class LongPress(App):
def create_clock(self, widget, touch, *args):
callback = partial(self.menu, touch)
Clock.schedule_once(callback, 2)
touch.ud['event'] = callback
def delete_clock(self, widget, touch, *args):
Clock.unschedule(touch.ud['event'])
def menu(self, touch, *args):
menu = BoxLayout(
size_hint=(None, None),
orientation='vertical',
center=touch.pos)
menu.add_widget(Button(text='a'))
menu.add_widget(Button(text='b'))
close = Button(text='close')
close.bind(on_release=partial(self.close_menu, menu))
menu.add_widget(close)
self.root.add_widget(menu)
def close_menu(self, widget, *args):
self.root.remove_widget(widget)
def build(self):
self.root = FloatLayout()
self.root.bind(
on_touch_down=self.create_clock,
on_touch_up=self.delete_clock)
if __name__ == '__main__':
LongPress().run() | InfinityCliff/F-150_Console | Samples/longbutpress.py | longbutpress.py | py | 1,207 | python | en | code | 7 | github-code | 50 |
69848990874 | from PIL import Image
import os
# Ruta de la carpeta que contiene las imágenes PNG y donde se guardarán las imágenes JPG
ruta_carpeta = ""
# Obtener la lista de archivos en la carpeta
archivos = os.listdir(ruta_carpeta)
# Iterar sobre los archivos en la carpeta
for archivo in archivos:
# Comprobar si el archivo es una imagen PNG
if archivo.endswith(".png"):
# Generar la ruta completa del archivo PNG
ruta_imagen_png = os.path.join(ruta_carpeta, archivo)
# Abrir la imagen PNG
imagen = Image.open(ruta_imagen_png)
# Generar la ruta completa para el archivo JPG
nombre_archivo = os.path.splitext(archivo)[0]
ruta_imagen_jpg = os.path.join(ruta_carpeta, nombre_archivo + ".jpg")
# Convertir y guardar la imagen en formato JPG
imagen.convert("RGB").save(ruta_imagen_jpg, "JPEG")
# Cerrar la imagen
imagen.close()
| sitocristobal/granito | Changeextension.py | Changeextension.py | py | 920 | python | es | code | 0 | github-code | 50 |
18777169664 | from Functions import Q_table, StateRep, Reward, Qvalue, maxQ
import pandas
maxBuffer = 4
StationsNumber = 2
UtilizationDisc = 100 // 25
gamma = 0.9
alpha = 0.3
#OrderTypes = 3 # fixing the number of OrderTypes
Qdf = Q_table(maxBuffer, UtilizationDisc, StationsNumber) # initializing Q-table to our parameters and filling it with all 0s
Qdf.to_csv(r'E:\pfe\Qtable.csv', index=False)
| neprev/DTSimioRL | Step0.py | Step0.py | py | 405 | python | en | code | 2 | github-code | 50 |
26264978378 | import os, logging
from langchain.vectorstores import SupabaseVectorStore
from langchain.embeddings import OpenAIEmbeddings
from langchain.llms import OpenAI
#https://python.langchain.com/en/latest/modules/chains/index_examples/chat_vector_db.html
from langchain.chains import ConversationalRetrievalChain
import openai.error as errors
from supabase import Client, create_client
from dotenv import load_dotenv
load_dotenv()
def qna(question: str, vector_name: str, chat_history=None):
logging.info(f"Initiating Supabase store: {vector_name}")
# init embedding and vector store
supabase_url = os.getenv('SUPABASE_URL')
supabase_key = os.getenv('SUPABASE_KEY')
logging.info(f"Supabase URL: {supabase_url} vector_name: {vector_name}")
embeddings = OpenAIEmbeddings()
supabase: Client = create_client(supabase_url, supabase_key)
vectorstore = SupabaseVectorStore(supabase,
embeddings,
table_name=vector_name,
query_name=f'match_documents_{vector_name}')
logging.info(f"vectorstore.table_name {vectorstore.table_name}")
retriever = vectorstore.as_retriever(search_kwargs=dict(k=4))
llm = OpenAI(temperature=0)
qa = ConversationalRetrievalChain.from_llm(llm,
retriever=retriever,
return_source_documents=True,
verbose=True,
output_key='answer',
max_tokens_limit=3500)
result = qa({"question": question, "chat_history": chat_history})
# try:
# except errors.InvalidRequestError as error:
# result = {"answer": "The prompt given was too big", "error": str(error)}
# except Exception as error:
# result = {"answer": f"An error occurred - {str(error)}", "error": str(error)}
return result | kpister/prompt-linter | data/scraping/repos/kumar045~langchain-github/qna~question_service.py | qna~question_service.py | py | 2,033 | python | en | code | 0 | github-code | 50 |
37350414540 | # CP template Version 1.006
import os
import sys
#import string
#from functools import cmp_to_key, reduce, partial
#import itertools
#from itertools import product
#import collections
#from collections import deque
#from collections import Counter, defaultdict as dd
#import math
#from math import log, log2, ceil, floor, gcd, sqrt
#from heapq import heappush, heappop
#import bisect
#from bisect import bisect_left as bl, bisect_right as br
DEBUG = False
def main(f=None):
init(f)
# sys.setrecursionlimit(10**9)
# ######## INPUT AREA BEGIN ##########
n = int(input())
cmd = [sys.stdin.readline().strip() for _ in range(n)]
d, h, t= dict(), 0, 1
for i in cmd:
if 'push_front' in i:
h += 1
d[h] = int(i[11:])
elif 'push_back' in i:
t -= 1
d[t] = int(i[10:])
elif i == 'pop_front':
try:
print(d[h])
del d[h]
h -= 1
except: print(-1)
elif i == 'pop_back':
try:
print(d[t])
del d[t]
t += 1
except: print(-1)
elif i == 'size': print(len(d))
elif i == 'empty': print(1 if len(d) == 0 else 0)
elif i == 'front':
try: print(d[h])
except: print(-1)
elif i == 'back':
try: print(d[t])
except: print(-1)
# ######## INPUT AREA END ############
# TEMPLATE ###############################
enu = enumerate
def For(*args):
return itertools.product(*map(range, args))
def Mat(h, w, default=None):
return [[default for _ in range(w)] for _ in range(h)]
def nDim(*args, default=None):
if len(args) == 1:
return [default for _ in range(args[0])]
else:
return [nDim(*args[1:], default=default) for _ in range(args[0])]
def setStdin(f):
global DEBUG, input
DEBUG = True
sys.stdin = open(f)
input = sys.stdin.readline
def init(f=None):
global input
input = sys.stdin.readline # by default
if os.path.exists("o"):
sys.stdout = open("o", "w")
if f is not None:
setStdin(f)
else:
if len(sys.argv) == 1:
if os.path.isfile("in/i"):
setStdin("in/i")
elif os.path.isfile("i"):
setStdin("i")
elif len(sys.argv) == 2:
setStdin(sys.argv[1])
else:
assert False, "Too many sys.argv: %d" % len(sys.argv)
def pr(*args):
if DEBUG:
print(*args)
def pfast(*args, end="\n", sep=' '):
sys.stdout.write(sep.join(map(str, args)) + end)
def parr(arr):
for i in arr:
print(i)
if __name__ == "__main__":
main() | TaemHam/Baekjoon_Submission | 10866/main.py | main.py | py | 2,628 | python | en | code | 0 | github-code | 50 |
9542843433 |
import smtplib
from email.utils import formataddr
from email.mime.text import MIMEText
from email.mime.multipart import MIMEMultipart
import json
def send_mail(message):
with open('vars.json') as f:
vars = json.loads(f.read())
server = smtplib.SMTP_SSL(vars['SMTPSERVER'], vars['SMTPPORT'])
server.login(vars['YAHOO'], vars['YAHOOPSWD'])
msg = MIMEMultipart()
msg['Subject'] = 'Update from iHerb'
msg['from'] = formataddr(('Ravid Bondy scraper', vars['YAHOO']))
msg['To'] = vars['GMAIL']
msg.attach(MIMEText(message, 'plain'))
server.send_message(msg)
server.quit()
| bondyr135/legendary-octo-spork | mail_sender.py | mail_sender.py | py | 658 | python | en | code | 0 | github-code | 50 |
8958728841 | from django.shortcuts import render, redirect
from shopping.signupform import CustomUserCreationForm
#
# def signup(request):
# if request.method == 'POST':
# form = CustomUserCreationForm(request.POST)
#
# if form.is_valid():
# form.save()
# return redirect('/login/')
# else:
# error_message = form.errors
# return render(request, 'register.html', {'form': form, 'error_message': error_message})
# else:
# form = CustomUserCreationForm()
# return render(request, 'register.html', {'form': form})
from django.db import IntegrityError
def signup(request):
"""
If the form is valid, save it, otherwise, return the form with the errors
:param request: The request is an HttpRequest object
:return: The form is being returned.
"""
if request.method == 'POST':
form = CustomUserCreationForm(request.POST)
if form.is_valid():
try:
form.save()
except IntegrityError:
error_message = 'Username already taken. Please choose a different one.'
return render(request, 'register.html', {'form': form, 'error_message': error_message})
else:
return redirect('/login/')
else:
error_message = form.errors
return render(request, 'register.html', {'form': form, 'error_message': error_message})
else:
form = CustomUserCreationForm()
return render(request, 'register.html', {'form': form})
| locallhosts/Retailshopping | shopping/signup_view.py | signup_view.py | py | 1,565 | python | en | code | 0 | github-code | 50 |
71467994716 | # -*- coding: utf-8 -*-
import re
if __name__ == "__main__":
input_str = input()
ans_str = "No"
reg_str = r"^methoo*d$"
searched_result = re.search(reg_str, input_str)
if searched_result:
ans_str = "Yes"
print(ans_str)
| ksato-dev/algo_method | 6_re/re1_2.py | re1_2.py | py | 251 | python | en | code | 0 | github-code | 50 |
27063410608 | import pandas as pd
import os
def dataframe_concat(n):
dataframe = None
for i in range(1, n + 1):
filePath = f"./label {i}.csv"
dataframe = pd.concat([dataframe, pd.read_csv(filePath, index_col=0)], ignore_index=True)
dataframe.to_csv("./final.csv")
if __name__ == "__main__":
n = len(os.listdir('./')) - 1
dataframe_concat(n)
| mmmmmcree/Project | 手势识别/Hand Landmarks data/dataframe_concat.py | dataframe_concat.py | py | 367 | python | en | code | 0 | github-code | 50 |
35713145962 | import os
import subprocess
import sys
from distutils.core import setup
from typing import List
_minimum_version = (3, 7)
if sys.version_info < _minimum_version:
raise RuntimeError('Required Python {}'.format(
'.'.join([str(i) for i in _minimum_version])
))
version = '0.1.0'
proto_pkgs = ['keymaster_pb2', 'keymaster_pb2_grpc']
def list_packages(path: str = None) -> List[str]:
actual_path = path
package_list = []
if not actual_path:
actual_path = 'keymaster'
package_list.append(actual_path)
for node_name in os.listdir(actual_path):
if node_name[0] in ['.', '_']:
continue
sub_path = os.path.join(actual_path, node_name)
print(sub_path)
if not os.path.isdir(sub_path):
print(' - skipped (not dir)')
continue
print(' - included')
package_list.append(sub_path)
package_list.extend(list_packages(sub_path))
return [
pkg_path.replace(r'/', '.')
for pkg_path in package_list
]
package_list = list_packages()
setup(
name='keymaster',
version=version,
description='Key/Secret Management System',
license='Apache 2.0',
author='Juti Noppornpitak',
author_email='juti_n@yahoo.co.jp',
url='https://github.com/shiroyuki/gallium',
packages=package_list,
classifiers=[
'Development Status :: 5 - Production/Stable',
'Intended Audience :: Developers',
'License :: OSI Approved :: Apache License',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Programming Language :: Python :: 3',
'Topic :: Software Development :: Libraries'
],
install_requires=[
'gallium>=1.5.1',
'imagination>=3.1.0',
'kotoba',
'cryptography',
'keyring',
'pyperclip',
'xmode>=0.5',
'grpcio-tools',
'grpcio',
'qrcode[pil]',
'pyjwt',
'sqlalchemy',
'pymysql',
],
python_requires='>=3.7',
entry_points = {
'console_scripts': [
'keymaster=keymaster.starter:activate',
'km=keymaster.starter:activate',
],
}
)
| shiroyuki/keymaster | setup.py | setup.py | py | 2,226 | python | en | code | 0 | github-code | 50 |
19723215684 | import numpy as np #version 1.9.2
import pandas as pd
#import seaborn as sns
pd.options.display.width = 0
def read_data():
data = pd.read_csv("data/admission/Admission_Predict.csv")
data.columns = data.columns.str.strip()
data.columns = data.columns.str.replace(" ", "_")
print("\n\n---\nHead \n", data.head())
data_stats = data.describe()
print("\n\n---\nStats \n", data_stats)
print("\n\n---\nNa values \n")
print(data.isna().sum())
data = data.dropna()
x = np.array(data.drop(["Chance_of_Admit"], axis=1))
y = np.array(data["Chance_of_Admit"]).reshape(-1, 1)
return (x, y, data_stats) | al1357/py_ml_algorithms | logistic_regression/data_admissions.py | data_admissions.py | py | 686 | python | en | code | 0 | github-code | 50 |
2415260724 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# draw_figure.py
# author: Kentaro Wada <www.kentaro.wada@gmail.com>
import sys
import pygame
screen_size = (640, 480)
pygame.init()
screen = pygame.display.set_mode(screen_size)
pygame.display.set_caption("図形の描画")
while True:
screen.fill((0,0,0))
# 図形を描画
# 黄の矩形
pygame.draw.rect(screen, (255,255,0), pygame.Rect(10,10,300,200))
# 赤の円
pygame.draw.circle(screen, (255,0,0), (320,240), 100)
# 紫の楕円
pygame.draw.ellipse(screen, (255,0,255), (400,300,200,100), 1)
# 白い線
pygame.draw.line(screen, (255,255,255), (0,0), (640,480))
pygame.display.update()
for event in pygame.event.get():
if event.type == pygame.QUIT:
sys.exit()
| wkentaro/inbox-arhive | python/game/code/draw_figure.py | draw_figure.py | py | 790 | python | en | code | 1 | github-code | 50 |
13878773848 | import traceback
from challenges.models import ChallengePhase
challenge_phases = ChallengePhase.objects.all()
try:
for phase in challenge_phases:
phase.max_submissions_per_month = phase.max_submissions
phase.save()
except Exception as e:
print(e)
print(traceback.print_exc())
| Cloud-CV/EvalAI | scripts/migration/set_monthly_submission_limit.py | set_monthly_submission_limit.py | py | 307 | python | en | code | 1,583 | github-code | 50 |
22931008479 | #_*_ coding: utf-8 _*_
#https://sshuhei.com
import json
import logging
import logging.handlers
import time
import itertools
from src import channel
from hyperopt import fmin, tpe, hp
def describe(params):
i, j, k, l, m, cost, mlMode, fileName = params
channelBreakOut = channel.ChannelBreakOut()
channelBreakOut.entryTerm = i[0]
channelBreakOut.closeTerm = i[1]
channelBreakOut.rangeTh = j[0]
channelBreakOut.rangeTerm = j[1]
channelBreakOut.waitTerm = k[0]
channelBreakOut.waitTh = k[1]
channelBreakOut.rangePercent = l[0]
channelBreakOut.rangePercentTerm = l[1]
channelBreakOut.candleTerm = str(m) + "T"
channelBreakOut.cost = cost
channelBreakOut.fileName = fileName
logging.info("===========Test pattern===========")
logging.info('candleTerm:%s',channelBreakOut.candleTerm)
logging.info('entryTerm:%s closeTerm:%s',channelBreakOut.entryTerm,channelBreakOut.closeTerm)
logging.info('rangePercent:%s rangePercentTerm:%s',channelBreakOut.rangePercent,channelBreakOut.rangePercentTerm)
logging.info('rangeTerm:%s rangeTh:%s',channelBreakOut.rangeTerm,channelBreakOut.rangeTh)
logging.info('waitTerm:%s waitTh:%s',channelBreakOut.waitTerm,channelBreakOut.waitTh)
logging.info("===========Backtest===========")
pl, profitFactor, maxLoss, winPer, ev = channelBreakOut.describeResult()
if "PFDD" in mlMode:
result = profitFactor/maxLoss
elif "PL" in mlMode:
result = -pl
elif "PF" in mlMode:
result = -profitFactor
elif "DD" in mlMode:
result = -maxLoss
elif "WIN" in mlMode:
result = -winPer
elif "EV" in mlMode:
result = -ev
logging.info("===========Assessment===========")
logging.info('Result:%s',result)
return result
def optimization(cost, fileName, hyperopt, mlMode, showTradeDetail):
#optimizeList.jsonの読み込み
f = open('config/optimizeList.json', 'r', encoding="utf-8")
config = json.load(f)
entryAndCloseTerm = config["entryAndCloseTerm"]
rangeThAndrangeTerm = config["rangeThAndrangeTerm"]
waitTermAndwaitTh = config["waitTermAndwaitTh"]
rangePercentList = config["rangePercentList"]
linePattern = config["linePattern"]
termUpper = config["termUpper"]
candleTerm = config["candleTerm"]
if "COMB" in linePattern:
entryAndCloseTerm = list(itertools.product(range(2,termUpper), range(2,termUpper)))
total = len(entryAndCloseTerm) * len(rangeThAndrangeTerm) * len(waitTermAndwaitTh) * len(rangePercentList) * len(candleTerm)
logging.info('Total pattern:%s Searches:%s',total,hyperopt)
logging.info("======Optimization start======")
#hyperoptによる最適値の算出
space = [hp.choice('i',entryAndCloseTerm), hp.choice('j',rangeThAndrangeTerm), hp.choice('k',waitTermAndwaitTh), hp.choice('l',rangePercentList), hp.choice('m',candleTerm), cost, mlMode, fileName]
result = fmin(describe,space,algo=tpe.suggest,max_evals=hyperopt)
logging.info("======Optimization finished======")
channelBreakOut = channel.ChannelBreakOut()
channelBreakOut.entryTerm = entryAndCloseTerm[result['i']][0]
channelBreakOut.closeTerm = entryAndCloseTerm[result['i']][1]
channelBreakOut.rangeTh = rangeThAndrangeTerm[result['j']][0]
channelBreakOut.rangeTerm = rangeThAndrangeTerm[result['j']][1]
channelBreakOut.waitTerm = waitTermAndwaitTh[result['k']][0]
channelBreakOut.waitTh = waitTermAndwaitTh[result['k']][1]
channelBreakOut.rangePercent = rangePercentList[result['l']][0]
channelBreakOut.rangePercentTerm = rangePercentList[result['l']][1]
channelBreakOut.candleTerm = str(candleTerm[result['m']]) + "T"
channelBreakOut.cost = cost
channelBreakOut.fileName = fileName
channelBreakOut.showTradeDetail = showTradeDetail
logging.info("======Best pattern======")
logging.info('candleTerm:%s mlMode:%s',channelBreakOut.candleTerm,mlMode)
logging.info('entryTerm:%s closeTerm:%s',channelBreakOut.entryTerm,channelBreakOut.closeTerm)
logging.info('rangePercent:%s rangePercentTerm:%s',channelBreakOut.rangePercent,channelBreakOut.rangePercentTerm)
logging.info('rangeTerm:%s rangeTh:%s',channelBreakOut.rangeTerm,channelBreakOut.rangeTh)
logging.info('waitTerm:%s waitTh:%s',channelBreakOut.waitTerm,channelBreakOut.waitTh)
logging.info("======Backtest======")
channelBreakOut.describeResult()
#config.json設定用ログ
print("======config======")
print(" \"entryTerm\" : ", channelBreakOut.entryTerm, ",", sep="")
print(" \"closeTerm\" : ", channelBreakOut.closeTerm, ",", sep="")
if channelBreakOut.rangePercent is None:
print(" \"rangePercent\" : ", "null,", sep="")
else:
print(" \"rangePercent\" : ", channelBreakOut.rangePercent, ",", sep="")
if channelBreakOut.rangePercentTerm is None:
print(" \"rangePercentTerm\" : ", "null,", sep="")
else:
print(" \"rangePercentTerm\" : ", channelBreakOut.rangePercentTerm, ",", sep="")
if channelBreakOut.rangeTerm is None:
print(" \"rangeTerm\" : ", "null,", sep="")
else:
print(" \"rangeTerm\" : ", channelBreakOut.rangeTerm, ",", sep="")
if channelBreakOut.rangeTh is None:
print(" \"rangeTh\" : ", "null,", sep="")
else:
print(" \"rangeTh\" : ", channelBreakOut.rangeTh, ",", sep="")
print(" \"waitTerm\" : ", channelBreakOut.waitTerm, ",", sep="")
print(" \"waitTh\" : ", channelBreakOut.waitTh, ",", sep="")
print(" \"candleTerm\" : \"", channelBreakOut.candleTerm, "\",", sep="")
print("==================")
if __name__ == '__main__':
#logging設定
logging.basicConfig(
level=logging.INFO,
format='%(asctime)s %(levelname)s: %(message)s',
datefmt='%Y-%m-%d %H:%M:%S')
logfile=logging.handlers.TimedRotatingFileHandler(
filename = 'log/optimization.log',
when = 'midnight'
)
logfile.setLevel(logging.INFO)
logfile.setFormatter(logging.Formatter(
fmt='%(asctime)s %(levelname)s: %(message)s',
datefmt='%Y-%m-%d %H:%M:%S'))
logging.getLogger('').addHandler(logfile)
logging.info('Wait...')
#config.jsonの読み込み
f = open('config/config.json', 'r', encoding="utf-8")
config = json.load(f)
logging.info('cost:%s mlMode:%s fileName:%s',config["cost"],config["mlMode"],config["fileName"])
#最適化
start = time.time()
optimization(cost=config["cost"], fileName=config["fileName"], hyperopt=config["hyperopt"], mlMode=config["mlMode"], showTradeDetail=config["showTradeDetail"])
logging.info('total processing time: %s', time.time() - start)
| Connie-Wild/ChannelBreakoutBot | machineLearning.py | machineLearning.py | py | 6,723 | python | en | code | 199 | github-code | 50 |
12323243647 | def main():
elfCalories = []
currentSum = 0
with open("elf_calorie_list.txt", "r") as file:
for line in file:
if line != "\n":
currentSum += int(line)
else:
elfCalories.append(currentSum)
currentSum = 0
top3Calories = sorted(elfCalories, reverse=True)[:3]
top3Total = sum(top3Calories)
print("Total of top 3 elf calories: ", top3Total)
if __name__ == "__main__":
main() | EwanWilliams/advent-of-code-2022 | day1/day1_puzzle2.py | day1_puzzle2.py | py | 517 | python | en | code | 0 | github-code | 50 |
27179941409 | def double_char(s):
i = 0
value = ""
while i < len(s):
value += s[i] + s[i]
i += 1
return value
def count_hi(s):
i = 0
value = 0
while i < len(s) - 1:
if "hi" == s[i] + s[i + 1]:
value += 1
i += 1
return value
def cat_dog(s):
i = 0
cvalue = 0
dvalue = 0
while i < len(s) - 2:
if "dog" == s[i] + s[i + 1] + s[i + 2]:
dvalue += 1
if "cat" == s[i] + s[i + 1] + s[i + 2]:
cvalue += 1
i += 1
return cvalue == dvalue
def count_code(s):
i = 0
value = 0
while i < len(s) - 3:
if "coe" == s[i] + s[i + 1] + s[i + 3]:
value += 1
return value
def end_other(a,b):
a1 = a.lower()
b1 = b.lower()
isbLong = len(a1) < len(b1)
if isbLong:
return b1[len(b1) - len(a1):len(b1)] == a1
else:
return a1[len(a1) - len(b1):len(a1)] == b1
def xyz_there(s):
i = 0
while i < len(s) - 2:
if "xyz" == s[i] + s[i + 1] + s[i + 2] and not s[i - 1] == ".":
return True
i += 1
return False
| CreativePenguin/stuy-cs | intro-comp-sci2/python/Homework#11String-2.py | Homework#11String-2.py | py | 1,117 | python | en | code | 0 | github-code | 50 |
17575619324 | """ MTFB-CNN model from Hongli Li et al 2023.
See details at https://doi.org/10.1016/j.bspc.2022.104066
Notes
-----
The initial values in this model are based on the values identified by the authors
References
----------
Li H, Chen H, Jia Z, et al.
A parallel multi-scale time-frequency block convolutional neural network based on channel attention module for motor imagery classification[J].
Biomedical Signal Processing and Control, 2023, 79: 104066.
"""
import os
import sys
current_path = os.path.abspath(os.path.dirname(__file__))
rootPath = os.path.split(current_path)[0]
sys.path.append(current_path)
sys.path.append(rootPath)
import numpy as np
import torch
import torch.nn as nn
from torchinfo import summary
from torchstat import stat
import yaml
class TFB(nn.Module):
def __init__(self, kerSize_1, kerSize_2, kerSize_3, kerStr, out_chan, pool_ker, pool_str):
super(TFB, self).__init__()
self.kerSize_1 = kerSize_1
self.path_1 = nn.Sequential(
nn.Conv2d(
in_channels=1,
out_channels=out_chan,
kernel_size=(1,kerSize_1),
stride=(1,kerStr),
padding=(0, round(kerSize_1/2)-1 if kerSize_1%2==0 else round(kerSize_1/2)-2)
),
nn.BatchNorm2d(num_features=out_chan),
nn.SELU()
)
self.path_2 = nn.Sequential(
nn.Conv2d(
in_channels=1,
out_channels=out_chan,
kernel_size=(1,kerSize_2),
stride=(1,kerStr),
padding=(0,round(kerSize_2/2)-1)
),
nn.BatchNorm2d(num_features=out_chan),
nn.SELU()
)
self.path_3 = nn.Sequential(
nn.Conv2d(
in_channels=1,
out_channels=out_chan,
kernel_size=(1,kerSize_3),
stride=(1,kerStr),
padding=(0,round(kerSize_3/2)-1)
),
nn.BatchNorm2d(num_features=out_chan),
nn.SELU()
)
self.path_4 = nn.Sequential(
nn.MaxPool2d(
kernel_size=(1,pool_ker),
stride=(1,pool_str),
padding=(0,round(pool_ker/2)-1)
),
nn.Conv2d(
in_channels=1,
out_channels=out_chan,
kernel_size=1,
stride=1
),
nn.BatchNorm2d(num_features=out_chan),
nn.SELU()
)
def forward(self, x):
p1 = self.path_1(x)
p2 = self.path_2(x)
p3 = self.path_3(x)
p4 = self.path_4(x)
out = torch.cat((p1,p2,p3,p4), dim=1)
return out
class ResneXt(nn.Module):
def __init__(self, in_chan, kerSize, out_chan, dropoutRate):
super(ResneXt, self).__init__()
self.path_1 = nn.Sequential(
nn.Conv2d(
in_channels=in_chan,
out_channels=out_chan,
kernel_size=(1,kerSize),
stride=1,
padding='same'
),
nn.BatchNorm2d(num_features=out_chan),
nn.ELU(),
nn.Dropout(p=dropoutRate),
nn.Conv2d(
in_channels=out_chan,
out_channels=out_chan,
kernel_size=(1,kerSize),
stride=1,
padding='same'
),
nn.BatchNorm2d(num_features=out_chan)
)
self.path_2 = nn.Sequential(
nn.Conv2d(
in_channels=in_chan,
out_channels=out_chan,
kernel_size=(1,kerSize),
stride=1,
padding='same'
),
nn.BatchNorm2d(num_features=out_chan),
nn.ELU(),
nn.Dropout(p=dropoutRate),
nn.Conv2d(
in_channels=out_chan,
out_channels=out_chan,
kernel_size=(1,kerSize),
stride=1,
padding='same'
),
nn.BatchNorm2d(num_features=out_chan)
)
self.path_3 = nn.Sequential(
nn.Conv2d(
in_channels=in_chan,
out_channels=out_chan,
kernel_size=(1,kerSize),
stride=1,
padding='same'
),
nn.BatchNorm2d(num_features=out_chan),
nn.ELU(),
nn.Dropout(p=dropoutRate),
nn.Conv2d(
in_channels=out_chan,
out_channels=out_chan,
kernel_size=(1,kerSize),
stride=1,
padding='same'
),
nn.BatchNorm2d(num_features=out_chan)
)
self.path_4 = nn.Conv2d(
in_channels=in_chan,
out_channels=in_chan,
kernel_size=1,
stride=1
)
self.activate = nn.ELU()
def forward(self, x):
p1 = self.path_1(x)
p2 = self.path_2(x)
p3 = self.path_3(x)
p123 = torch.cat((p1,p2,p3), dim=1)
p4 = self.path_4(x)
x = p123+p4
out = self.activate(x)
return out
class CAM(nn.Module):
def __init__(self, chanSize, reduRatio):
super(CAM, self).__init__()
self.maxPool = nn.AdaptiveMaxPool2d(output_size=(chanSize, 1))
self.maxFc_1 = nn.Linear(
in_features=chanSize,
out_features=chanSize//reduRatio)
self.maxFc_2 = nn.Linear(
in_features=chanSize//reduRatio,
out_features=chanSize)
self.avgPool = nn.AdaptiveAvgPool2d(output_size=(chanSize, 1))
self.avgFc_1 = nn.Linear(
in_features=chanSize,
out_features=chanSize//reduRatio)
self.avgFc_2 = nn.Linear(
in_features=chanSize//reduRatio,
out_features=chanSize)
self.activate = nn.Sigmoid()
def forward(self, x):
res = x
max_x = self.maxPool(x)
max_x = torch.squeeze(max_x, dim=-1)
max_x = self.maxFc_1(max_x)
max_x = self.maxFc_2(max_x)
avg_x = self.avgPool(x)
avg_x = torch.squeeze(avg_x, dim=-1)
avg_x = self.avgFc_1(avg_x)
avg_x = self.avgFc_2(avg_x)
x = max_x+avg_x
x = self.activate(x)
x = torch.unsqueeze(x, dim=3)
x = x*res
out = x+res
return out
class MTFB_CNN(nn.Module):
def __init__(self, chanSize=22, n_classes=4):
super(MTFB_CNN, self).__init__()
self.branch_a = nn.Sequential(
TFB(
kerSize_1=4,
kerSize_2=6,
kerSize_3=10,
kerStr=2,
out_chan=6,
pool_ker=3,
pool_str=2
),
nn.MaxPool2d(
kernel_size=(1,4),
stride=(1,4)
),
nn.BatchNorm2d(num_features=24),
nn.Dropout(p=0.1),
ResneXt(
in_chan=24,
kerSize=8,
out_chan=8,
dropoutRate=0.1
),
nn.Dropout(p=0.1),
CAM(
chanSize=chanSize,
reduRatio=2
),
nn.MaxPool2d(
kernel_size=(1,6),
stride=(1,6)
),
nn.Flatten()
)
self.branch_b = nn.Sequential(
TFB(
kerSize_1=15,
kerSize_2=30,
kerSize_3=45,
kerStr=3,
out_chan=6,
pool_ker=10,
pool_str=3
),
nn.MaxPool2d(
kernel_size=(1,3),
stride=(1,3)
),
nn.BatchNorm2d(num_features=24),
nn.Dropout(p=0.1),
ResneXt(
in_chan=24,
kerSize=5,
out_chan=8,
dropoutRate=0.1
),
nn.Dropout(p=0.1),
CAM(
chanSize=chanSize,
reduRatio=2
),
nn.MaxPool2d(
kernel_size=(1,4),
stride=(1,4)
),
nn.Flatten()
)
self.branch_c = nn.Sequential(
TFB(
kerSize_1=50,
kerSize_2=70,
kerSize_3=120,
kerStr=4,
out_chan=6,
pool_ker=20,
pool_str=4
),
nn.MaxPool2d(
kernel_size=(1,3),
stride=(1,3)
),
nn.BatchNorm2d(num_features=24),
nn.Dropout(p=0.1),
ResneXt(
in_chan=24,
kerSize=5,
out_chan=8,
dropoutRate=0.1
),
nn.Dropout(p=0.1),
CAM(
chanSize=chanSize,
reduRatio=2
),
nn.MaxPool2d(
kernel_size=(1,4),
stride=(1,4)
),
nn.Flatten()
)
self.fc = nn.Linear(
in_features=(20+27+20)*chanSize*24,
out_features=n_classes
)
self.activate = nn.Softmax(dim=-1)
def forward(self, x):
# input shape (batch_size, C, T)
if len(x.shape) is not 4:
x = torch.unsqueeze(x, 1)
# input shape (batch_size, 1, C, T)
bra_a = self.branch_a(x) # (batch, out_chan, channels, 20)
bra_b = self.branch_b(x) # (batch, out_chan, channels, 27)
bra_c = self.branch_c(x) # (batch, out_chan, channels, 20)
x = torch.cat((bra_a, bra_b, bra_c), dim=-1)
x = self.fc(x)
out = self.activate(x)
return out
###============================ Initialization parameters ============================###
channels = 22
samples = 1000
n_classes = 4
###============================ main function ============================###
def main():
input = torch.randn(32, channels, samples)
model = MTFB_CNN(channels, n_classes)
out = model(input)
print('===============================================================')
print('out', out.shape)
print('model', model)
summary(model=model, input_size=(1,1,channels,samples), device="cpu")
stat(model, (1, channels, samples))
if __name__ == "__main__":
main() | LiangXiaohan506/EISATC-Fusion | models/MTFB_CNN.py | MTFB_CNN.py | py | 10,547 | python | en | code | 2 | github-code | 50 |
21393235048 | import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
data = np.load("MUG-varyM_AllImitate_Z100_N12_n1000_beta10.npz")
P_pq_t = data["strategies"]
pay_pq_t = data["payoffs"]
M_arr = data["GroupCutoff_arr"]
print(np.shape(P_pq_t))
Pmean_pq_t = np.mean(np.mean(np.mean(
P_pq_t[:,:,-250:,:,:], axis=1), axis=1), axis=1)
print(np.shape(Pmean_pq_t))
plt.plot(M_arr, Pmean_pq_t[:,0], 'bo-',
label=r'$<<<p>_{i=1}^{Z}>_{t=750}^{1000}>_{trial=1}^{25}$')
plt.plot(M_arr, Pmean_pq_t[:,1], 'go-',
label=r'$<<<q>_{i=1}^{Z}>_{t=750}^{1000}>_{trial=1}^{25}$')
plt.legend(framealpha=0)
plt.xlabel("M - acceptance cutoff")
plt.title("N=12 Z=100")
plt.savefig("Z100_N12_qpVSM.png")
plt.show()
| anuanupapa/MUG | Z100_N12/data_plot.py | data_plot.py | py | 721 | python | en | code | 0 | github-code | 50 |
27806385996 | #!/usr/bin/env python3
import patt
import logging
import os
import tempfile
from pathlib import Path
import shutil
from string import Template
import time
logger = logging.getLogger('patt_postgres')
def log_results(result, hide_stdout=False):
patt.log_results(logger='patt_postgres', result=result, hide_stdout=hide_stdout)
"""
install postgres packages and dep on each nodes
"""
def postgres_init(postgres_version, nodes):
logger.info ("processing {}".format ([n.hostname for n in nodes]))
patt.host_id(nodes)
patt.check_dup_id (nodes)
result = patt.exec_script (nodes=nodes, src="./dscripts/d20.postgres.sh",
args=['init'] + [postgres_version], sudo=True)
log_results (result)
def postgres_ssl_cert_init(nodes):
logger.info ("processing {}".format ([n.hostname for n in nodes]))
patt.host_id(nodes)
patt.check_dup_id (nodes)
result = patt.exec_script (nodes=nodes, src="dscripts/ssl_cert_postgres.sh",
args=['init'], sudo=True)
log_results (result)
def postgres_get_cert (q, postgres_user='postgres', nodes=[]):
if q == 'root.crt':
e='--get_ca_crt'
elif q == 'root.key':
e='--get_ca_key'
else:
raise ValueError ("unknow query {}".format(q))
for n in nodes:
try:
result = patt.exec_script (nodes=[n], src="dscripts/ssl_cert_postgres.py",
args=['-u', postgres_user, e], sudo=True)
except:
continue
else:
for r in result:
if r.out:
return r.out
finally:
log_results (result, hide_stdout=True)
def postgres_ssl_cert(cluster_name,
postgres_user='postgres',
nodes=[],
keep_ca=True):
ssl_script="misc/self_signed_certificate.py"
source = patt.Source()
logger.info ("processing {}".format ([n.hostname for n in nodes]))
patt.host_id(nodes)
patt.check_dup_id (nodes)
# if run via puppet it will install the cert on the running agent but
# not the others node before running the installer.
# Retrieve and distribue the cert to all nodes
running_node = source.whoami(nodes)
ca_provider=nodes
if running_node:
#ca_provider=[running_node]
self_ca_dir=None # installing from peer
else:
# not installing from peer
if keep_ca:
self_home = os.path.expanduser("~")
self_ca_dir = self_home + '/' + '.patt/ca'
Path(self_ca_dir).mkdir(parents=True, exist_ok=True, mode=0o700)
for i in ['root.key', 'root.crt']:
tmp = None
for k in range (5):
try:
tmp = postgres_get_cert (q=i, postgres_user=postgres_user, nodes=ca_provider)
assert isinstance(tmp, (str, bytes))
except:
# generate CA on first node and retry
result = patt.exec_script (nodes=[sorted(nodes, key=lambda n: n.hostname)[0]],
src="dscripts/ssl_cert_postgres.py",
payload=ssl_script,
args=['-c'] + [cluster_name] +
['-s'] + [os.path.basename (ssl_script)] +
['-u'] + [postgres_user] +
['--ca_country_name', "'UK'"] +
['--ca_state_or_province_name', "'United Kingdom'"] +
['--ca_locality_name', "'Cambridge'"] +
['--ca_organization_name', "'Patroni Postgres Cluster'"] +
['--ca_common_name', "'CA {}'".format (cluster_name)] +
['--ca_not_valid_after', "'3650'"] +
['-p'] + [p.hostname for p in nodes] +
list ([" ".join(p.ip_aliases) for p in nodes]),
sudo=True)
log_results (result)
tmp = postgres_get_cert (q=i, postgres_user=postgres_user,
nodes=[sorted(nodes, key=lambda n: n.hostname)[0]])
if isinstance(tmp, (str, bytes)):
break
else:
time.sleep (3)
continue
else:
break
assert isinstance(tmp, (str, bytes))
with tempfile.TemporaryDirectory() as tmp_dir:
with open (tmp_dir + '/' + i, "w") as cf:
cf.write(tmp)
cf.write('\n')
cf.flush()
cf.close()
os.chmod(cf.name, 0o640)
if self_ca_dir:
if os.path.isdir(self_ca_dir):
t = self_ca_dir + '/' + cluster_name + '-' + os.path.basename (cf.name)
if not os.path.isfile (t): shutil.copy2(cf.name, t)
result = patt.exec_script (nodes=nodes, src="dscripts/ssl_cert_postgres.sh",
payload=tmp_dir + '/' + i,
args=['copy_ca', os.path.basename (tmp_dir + '/' + i), i],
sudo=True)
log_results (result, hide_stdout=True)
result = patt.exec_script (nodes=nodes,
src="dscripts/ssl_cert_postgres.py",
payload=ssl_script,
args=['-c'] + [cluster_name] +
['-s'] + [os.path.basename (ssl_script)] +
['-u'] + [postgres_user] +
['-p'] + [p.hostname for p in nodes] +
list ([" ".join(p.ip_aliases) for p in nodes]),
sudo=True)
log_results (result)
"""
lookup ~/.patt/ca/ for cluster_name-root.crt and cluster_name-root.key
if found generate a user cert ~/.patt/ca/cluster_name-user_name.crt/key
"""
def postgres_ssl_user_cert(cluster_name, user_names=[]):
self_home = os.path.expanduser("~")
self_ca_dir = self_home + '/' + '.patt/ca'
ca_path_crt = self_ca_dir + '/' + cluster_name + '-' + 'root.crt'
ca_path_key = self_ca_dir + '/' + cluster_name + '-' + 'root.key'
if os.path.isfile (ca_path_crt) and os.path.isfile (ca_path_key):
import misc.self_signed_certificate as ssl_gen
ca_key = ssl_gen.private_key(key_path=ca_path_key)
for i in user_names:
usr_path_crt = self_ca_dir + '/' + cluster_name + '-' + i + '.crt'
usr_path_key = self_ca_dir + '/' + cluster_name + '-' + i + '.key'
usr_key = ssl_gen.private_key(key_path=usr_path_key)
usr_crt = ssl_gen.mk_certificate_thin(country_name="UK",
state_or_province_name="United Kingdom",
locality_name="Cambridge",
organization_name="Patroni Postgres Cluster",
common_name=i,
private_key=ca_key,
public_key=usr_key.public_key(),
certificate_path=usr_path_crt,
ca_path=ca_path_crt,
not_valid_after_days=365,
dns=[],
ip=[]
)
"""
exec the script file as user postgres
if the script file is local, it will be first copyed on the nodes
if script file is executable it will be run as it is otherwise it will run via bash
script_file must be idempotent
"""
def postgres_exec(postgres_peers, script_file):
script = None
nodes=postgres_peers
if os.path.isfile (script_file):
script = script_file
script_arg=os.path.basename (script_file)
else:
script_arg=script_file
result = patt.exec_script (nodes=nodes,
src="./dscripts/postgres_exec.sh", payload=script,
args=[script_arg],
sudo=True,
log_call=True)
log_results (result)
return result
def postgres_db_role(postgres_peers,
role_name, database_name, role_options=[],
template_file=''):
dictionary={'role_options': ''}
dictionary['role_name']=role_name
if role_options:
dictionary['role_options']="WITH {}".format (" ".join(role_options))
dictionary['database_name']=database_name
with tempfile.TemporaryDirectory() as tmp_dir:
with open (tmp_dir + '/' + 'pg_db_role.script', "w") as cf:
with open(template_file, 'r') as t:
str=Template (t.read())
cf.write(str.substitute(dictionary))
cf.flush()
t.close()
result = postgres_exec(postgres_peers, cf.name)
cf.close()
def postgres_create_role(postgres_peers, role_name, role_options=[]):
postgres_db_role(postgres_peers=postgres_peers, role_name=role_name, role_options=role_options,
database_name='',
template_file="./config/pg_create_role.tmpl")
def postgres_create_database(postgres_peers, database_name, owner):
postgres_db_role(postgres_peers=postgres_peers, database_name=database_name, role_name=owner,
template_file="./config/pg_create_database.tmpl")
"""
postgres tablespace
"""
def postgres_create_tablespace(postgres_peers,
tablespace_name, location_path,
role_name='PUBLIC',
template_file="./config/pg_create_tablespace.tmpl"):
role_acl = "" if role_name.strip().lower() == "public" else role_name.strip()
dictionary = {}
dictionary['role_name'] = role_name
dictionary['role_acl'] = role_acl
dictionary['tablespace_name'] = tablespace_name
dictionary['tablespace_location'] = location_path
with tempfile.TemporaryDirectory() as tmp_dir:
with open (tmp_dir + '/' + 'pg_tablespace.script', "w") as cf:
with open(template_file, 'r') as t:
str=Template (t.read())
cf.write(str.substitute(dictionary))
cf.flush()
t.close()
result = postgres_exec(postgres_peers, cf.name)
cf.close()
"""
install postgres GC cron script
"""
def postgres_gc_cron(nodes, vaccum_full_df_percent, target, postgres_version):
logger.info ("processing {}".format ([n.hostname for n in nodes]))
patt.host_id(nodes)
patt.check_dup_id (nodes)
tmpl="./config/postgres-gc.sh.tmpl"
vacuumdb_option=""
if postgres_version >= 12:
vacuumdb_option="--skip-locked"
result = patt.exec_script (nodes=nodes, src="./dscripts/tmpl2file.py",
payload=tmpl,
args=['-t'] + [os.path.basename (tmpl)] +
['-o'] + [target] +
['--chmod'] + ['755'] +
['--dictionary_key_val'] + ["pc={}".format(vaccum_full_df_percent)] +
['--dictionary_key_val'] + ["vacuumdb_option={}".format(vacuumdb_option)] +
['--dictionary_key_val'] + ["postgres_version={}".format(postgres_version)],
sudo=True)
log_results (result)
"""
return when a connection check to a PostgreSQL database can be done or when timeout is reached
"""
def postgres_wait_ready (postgres_peers, postgres_version, timeout=120):
logger.info ("processing {}".format ([n.hostname for n in postgres_peers]))
patt.host_id(postgres_peers)
patt.check_dup_id (postgres_peers)
result = patt.exec_script (nodes=postgres_peers, src="./dscripts/pg_wait_ready.sh",
args=['wait_pg_isready'] + [postgres_version] + [timeout], sudo=True)
log_results (result)
return not all(x == False for x in [bool(n.out) for n in result])
| unipartdigital/puppet-patt | files/patt/patt_postgres.py | patt_postgres.py | py | 12,660 | python | en | code | 1 | github-code | 50 |
30213462563 | import copy
from typing import Dict, Any, List, Tuple
import collections
from tqdm import tqdm
import torch
import torch.nn
import torch.utils.data.dataloader
from tabluence.deep_learning.data.tensorizer.single_slice.base import SingleSliceTensorizerBase
class CustomTensorizer(SingleSliceTensorizerBase):
"""
The :cls:`CustomTensorizer` class is used to make sequence of tensors ready to be fed
into PyTorch models, obtained from the single slice dataset.
The tensorization configuration. Example:
```
{
'timestamp_column': 'utc_timestamp',
'value_config': {
'daily': {
'embed': {
'columns': ['heart_rate_tsvalue'],
'embedding_dim': 10,
},
'bring': ['heart_rate_tsvalue']
}
}
}
```
"""
def __init__(self, *args, **kwargs):
"""constructor"""
super(CustomTensorizer, self).__init__(*args, **kwargs)
self.embedding_layouts = None
def tensorize_single(self, slice_data, meta_data) -> Tuple[Dict[str, torch.Tensor], Dict[str, torch.Tensor], Dict[str, Any]]:
"""
tensorization of a single item within the batch of slices.
Parameters
----------
slice_data: `Dict[str, pandas.DataFrame]`, required
The data of a single slice.
meta_data: `Dict[str, Any]`, required
The meta data of a single slice.
Returns
----------
`Tuple[Dict[str, torch.Tensor], Dict[str, torch.Tensor], Dict[str, Any]]`: The batch item after tensorization (in almost all cases, the list item is a torch.Tensor).
"""
new_timestamp_data = dict()
new_slice_data = dict()
new_meta_data = meta_data
for data_source_name in slice_data.keys():
if data_source_name not in self.config['value_config']:
continue
slice_data[data_source_name].sort_values(by=self.config['timestamp_column'], inplace=True)
data_source_config = self.config['value_config'][data_source_name]
sequence_reps = []
timestamps = torch.from_numpy(slice_data[data_source_name][self.config['timestamp_column']].to_numpy()).to(self.device)
sequence_reps += [torch.from_numpy(slice_data[data_source_name][column].to_numpy().astype('float')).unsqueeze(-1).to(self.device) for column in data_source_config['bring']]
if 'embed' in data_source_config:
for column_to_be_embedded in data_source_config['embed']['columns']:
categorizer = lambda x: self.embedding_layouts[f"{data_source_name}___{column_to_be_embedded}"].index(x)
category_indices = torch.from_numpy(slice_data[data_source_name].loc[:, column_to_be_embedded].apply(categorizer).to_numpy().astype('int')).long().to(self.device)
sequence_reps += [getattr(self, f"embedding_{data_source_name}___{column_to_be_embedded}")(category_indices)]
if len(sequence_reps) > 1:
sequence_reps = torch.cat(sequence_reps, dim=1)
elif len(sequence_reps) == 1:
sequence_reps = sequence_reps[0]
else:
sequence_reps = None
new_slice_data[data_source_name] = sequence_reps
new_timestamp_data[data_source_name] = timestamps
return new_slice_data, new_timestamp_data, new_meta_data
def tensorize(self, batch) -> Dict[str, List[Any]]:
"""
Parameters
----------
batch: `Dict[str, List[Dict[str, pandas.DataFrame]]]`, required
The collated batch of a single-slice dataloader.
Returns
----------
`Dict[str, List[Any]]`: The batch after tensorization (in almost all cases, the list item is a torch.Tensor).
"""
batch = copy.deepcopy(batch)
modified_batch = {
'slice': [],
'meta': [],
'timestamp': []
}
for meta_data, slice_data in zip(batch['meta'], batch['slice']):
new_slice_data, new_timestamp_data, new_meta_data = self.tensorize_single(slice_data, meta_data)
modified_batch['slice'] += [{x: new_slice_data[x].to(self.device) for x in new_slice_data.keys()}]
modified_batch['meta'] += [new_meta_data]
modified_batch['timestamp'] += [{x: new_timestamp_data[x].to(self.device) for x in new_timestamp_data.keys()}]
return modified_batch
def sanity_check(self) -> None:
# - for configuration:
assert 'timestamp_column' in self.config, '"timestamp_column" is missing in the configuration'
assert 'value_config' in self.config, '"value_config" is missing in the configuration'
assert isinstance(self.config['value_config'], dict), '"value_config" must be a dictionary'
for ds_name, ds_config in self.config['value_config'].items():
if 'embed' in ds_config:
assert len(ds_config['embed']['columns']) == len(ds_config['bring']), "the embedding dims are not provided for all columns"
for i, k in enumerate(ds_config['embed']['embedding_dim']):
assert k > 1, f"the embedding dims must be greater than 1. It is not for the column {ds_config['embed']['columns'][i]}"
def build_embeddings(self) -> None:
"""
Builds the embedding modules for the columns that are to be embedded.
"""
assert self.embedding_layouts is not None, "the embeddings layouts are not built, please run `learn` first"
for key in self.embedding_layouts:
data_source_name, feature_name = key.split('___')
ds_config = self.config['value_config'][data_source_name]
embedding_dim = ds_config['embed']['embedding_dim'][ds_config['embed']['columns'].index(feature_name)]
self.add_module(f"embedding_{data_source_name}___{feature_name}", torch.nn.Embedding(len(self.embedding_layouts[key]), embedding_dim))
def get_embedding_layout(self, data_source_name: str, feature_name: str) -> List[str]:
"""
getting the embedding layout for a specific feature.
Parameters
----------
data_source_name: `str`, required
The name of the data source.
feature_name: `str`, required
The name of the feature.
Returns
----------
`List[str]`: The embedding layout for the feature associated with the given data source.
"""
assert self.embedding_layouts is not None, "run `learn` first"
return self.embedding_layouts[f"{data_source_name}___{feature_name}"]
def learn(self, dataloader: torch.utils.data.dataloader.DataLoader) -> None:
"""
Parameters
----------
dataloader: `torch.utils.data.dataloader.DataLoader`, required
A single slice dataset's dataloader. Please note that this is the dataloader that will be used
to learn all possible values for those columns to be embedded (`None`, corresponding to `pandas.nan`, is included as well).
When this process is done, the layouts themselves can also be accessed by calling :meth:`get_embedding_layout`.
"""
there_is_embedding_to_learn = False
for ds in self.config['value_config'].keys():
if 'embed' in self.config['value_config'][ds]:
there_is_embedding_to_learn = True
if not there_is_embedding_to_learn:
return
embedding_layouts = collections.defaultdict(lambda: set())
for batch in tqdm(dataloader):
batch = copy.deepcopy(batch)
for slice_data in batch['slice']:
for ds in slice_data.keys():
if ds in self.config['value_config']:
if 'embed' in self.config['value_config'][ds].keys():
for feature_name_to_be_embedded in self.config['value_config'][ds]['embed']['columns']:
embedding_layouts[ds +'___' + feature_name_to_be_embedded] = embedding_layouts[ds +'___' + feature_name_to_be_embedded].union(
slice_data[ds][feature_name_to_be_embedded].unique().tolist())
self.embedding_layouts = dict()
for key in embedding_layouts.keys():
self.embedding_layouts[key] = list(embedding_layouts[key])
self.build_embeddings()
| shayanfazeli/tabluence | tabluence/deep_learning/data/tensorizer/single_slice/custom_tensorizer.py | custom_tensorizer.py | py | 8,511 | python | en | code | 4 | github-code | 50 |
11488052446 | from ryu.base import app_manager
from ryu.controller import ofp_event
from ryu.controller import dpset
from ryu.controller import ofp_event
from ryu.controller.handler import CONFIG_DISPATCHER, MAIN_DISPATCHER
from ryu.app.wsgi import ControllerBase, WSGIApplication, route
from ryu.controller.handler import set_ev_cls
from ryu.ofproto import ofproto_v1_3
from ryu.lib.packet import packet
from ryu.lib.packet import ethernet
from ryu.lib.packet import ether_types
from ryu.topology.api import get_switch, get_link, get_host, get_all_host
from ryu.topology import event, switches
import networkx as nx
import json
import logging
import struct
from webob import Response
from ryu.lib.mac import haddr_to_bin
from ryu.lib.packet.packet import Packet
from ryu.lib.packet import arp
from ryu.lib.packet import ipv4
from ryu.lib.packet import tcp
from ryu.lib.packet import udp
from ryu.ofproto import ether
from ryu.app.ofctl.api import get_datapath
NUMBER_OF_SWITCH_PORTS = 3
class ZodiacSwitch(app_manager.RyuApp):
OFP_VERSIONS = [ofproto_v1_3.OFP_VERSION]
_CONTEXTS = {'wsgi': WSGIApplication}
def __init__(self, *args, **kwargs):
super(ZodiacSwitch, self).__init__(*args, **kwargs)
wsgi = kwargs['wsgi']
self.topology_api_app = self
self.net = nx.DiGraph()
self.nodes = {}
self.links = {}
self.mac_to_port = {}
self.mac_to_dpid = {}
self.port_to_mac = {}
self.ip_to_mac = {}
self.port_occupied = {}
self.GLOBAL_VARIABLE = 0
@set_ev_cls(ofp_event.EventOFPSwitchFeatures, CONFIG_DISPATCHER)
def switch_features_handler(self, ev):
datapath = ev.msg.datapath
ofproto = datapath.ofproto
parser = datapath.ofproto_parser
# install table-miss flow entry
#
# We specify NO BUFFER to max_len of the output action due to
# OVS bug. At this moment, if we specify a lesser number, e.g.,
# 128, OVS will send Packet-In with invalid buffer_id and
# truncated packet data. In that case, we cannot output packets
# correctly. The bug has been fixed in OVS v2.1.0.
match = parser.OFPMatch()
actions = [parser.OFPActionOutput(ofproto.OFPP_CONTROLLER,
ofproto.OFPCML_NO_BUFFER)]
self.add_flow(datapath, 0, match, actions)
def add_flow(self, datapath, priority, match, actions, buffer_id=None):
ofproto = datapath.ofproto
parser = datapath.ofproto_parser
inst = [parser.OFPInstructionActions(ofproto.OFPIT_APPLY_ACTIONS,
actions)]
if buffer_id:
mod = parser.OFPFlowMod(datapath=datapath, buffer_id=buffer_id,
priority=priority, match=match,
instructions=inst)
else:
mod = parser.OFPFlowMod(datapath=datapath, priority=priority,
match=match, instructions=inst)
datapath.send_msg(mod)
def send_arp(self, datapath, opcode, srcMac, srcIp, dstMac, dstIp, outPort):
# If it is an ARP request
if opcode == 1:
targetMac = "00:00:00:00:00:00"
targetIp = dstIp
# If it is an ARP reply
elif opcode == 2:
targetMac = dstMac
targetIp = dstIp
e = ethernet.ethernet(dstMac, srcMac, ether.ETH_TYPE_ARP)
a = arp.arp(1, 0x0800, 6, 4, opcode, srcMac, srcIp, targetMac, targetIp)
p = Packet()
p.add_protocol(e)
p.add_protocol(a)
p.serialize()
actions = [datapath.ofproto_parser.OFPActionOutput(outPort, 0)]
out = datapath.ofproto_parser.OFPPacketOut(
datapath=datapath,
buffer_id=0xffffffff,
in_port=datapath.ofproto.OFPP_CONTROLLER,
actions=actions,
data=p.data)
datapath.send_msg(out)
@set_ev_cls(ofp_event.EventOFPPacketIn, MAIN_DISPATCHER)
def _packet_in_handler(self, ev):
# If you hit this you might want to increase
# the "miss_send_length" of your switch
if ev.msg.msg_len < ev.msg.total_len:
self.logger.debug("packet truncated: only %s of %s bytes",
ev.msg.msg_len, ev.msg.total_len)
msg = ev.msg
datapath = msg.datapath
ofproto = datapath.ofproto
parser = datapath.ofproto_parser
in_port = msg.match['in_port']
pkt = packet.Packet(msg.data)
eth = pkt.get_protocols(ethernet.ethernet)[0]
if eth.ethertype == ether_types.ETH_TYPE_LLDP:
# ignore lldp packet
return
dst = eth.dst
src = eth.src
dpid_src = datapath.id
# TOPOLOGY DISCOVERY------------------------------------------
switch_list = get_switch(self.topology_api_app, None)
switches=[switch.dp.id for switch in switch_list]
if self.GLOBAL_VARIABLE == 0:
for s in switches:
for switch_port in range(1, NUMBER_OF_SWITCH_PORTS+1):
self.port_occupied.setdefault(s, {})
self.port_occupied[s][switch_port] = 0
self.GLOBAL_VARIABLE = 1
self.net.add_nodes_from(switches)
links_list = get_link(self.topology_api_app, None)
links=[(link.src.dpid,link.dst.dpid,{'port':link.src.port_no}) for link in links_list]
self.net.add_edges_from(links)
links=[(link.dst.dpid,link.src.dpid,{'port':link.dst.port_no}) for link in links_list]
self.net.add_edges_from(links)
links_=[(link.dst.dpid,link.src.dpid,link.dst.port_no) for link in links_list]
for l in links_:
self.port_occupied[l[0]][l[2]] = 1
# MAC LEARNING-------------------------------------------------
self.mac_to_port.setdefault(dpid_src, {})
self.port_to_mac.setdefault(dpid_src, {})
self.mac_to_port[dpid_src][src] = in_port
self.mac_to_dpid[src] = dpid_src
self.port_to_mac[dpid_src][in_port] = src
# HANDLE ARP PACKETS--------------------------------------------
if eth.ethertype == ether_types.ETH_TYPE_ARP:
arp_packet = pkt.get_protocol(arp.arp)
arp_dst_ip = arp_packet.dst_ip
arp_src_ip = arp_packet.src_ip
# self.logger.info("It is an ARP packet")
# If it is an ARP request
if arp_packet.opcode == 1:
# self.logger.info("It is an ARP request")
if arp_dst_ip in self.ip_to_mac:
# self.logger.info("The address is inside the IP TO MAC table")
srcIp = arp_dst_ip
dstIp = arp_src_ip
srcMac = self.ip_to_mac[arp_dst_ip]
dstMac = src
outPort = in_port
opcode = 2
self.send_arp(datapath, opcode, srcMac, srcIp, dstMac, dstIp, outPort)
# self.logger.info("packet in %s %s %s %s", srcMac, srcIp, dstMac, dstIp)
else:
# self.logger.info("The address is NOT inside the IP TO MAC table")
srcIp = arp_src_ip
dstIp = arp_dst_ip
srcMac = src
dstMac = dst
# learn the new IP address
self.ip_to_mac.setdefault(srcIp, {})
self.ip_to_mac[srcIp] = srcMac
# Send and ARP request to all the switches
opcode = 1
for id_switch in switches:
#if id_switch != dpid_src:
datapath_dst = get_datapath(self, id_switch)
for po in range(1,len(self.port_occupied[id_switch])+1):
if self.port_occupied[id_switch][po] == 0:
outPort = po
if id_switch == dpid_src:
if outPort != in_port:
self.send_arp(datapath_dst, opcode, srcMac, srcIp, dstMac, dstIp, outPort)
else:
self.send_arp(datapath_dst, opcode, srcMac, srcIp, dstMac, dstIp, outPort)
else:
srcIp = arp_src_ip
dstIp = arp_dst_ip
srcMac = src
dstMac = dst
if arp_dst_ip in self.ip_to_mac:
# learn the new IP address
self.ip_to_mac.setdefault(srcIp, {})
self.ip_to_mac[srcIp] = srcMac
# Send and ARP reply to the switch
opcode = 2
outPort = self.mac_to_port[self.mac_to_dpid[dstMac]][dstMac]
datapath_dst = get_datapath(self, self.mac_to_dpid[dstMac])
self.send_arp(datapath_dst, opcode, srcMac, srcIp, dstMac, dstIp, outPort)
# HANDLE IP PACKETS-----------------------------------------------
ip4_pkt = pkt.get_protocol(ipv4.ipv4)
if ip4_pkt:
src_ip = ip4_pkt.src
dst_ip = ip4_pkt.dst
src_MAC = src
dst_MAC = dst
proto = str(ip4_pkt.proto)
sport = "0"
dport = "0"
if proto == "6":
tcp_pkt = pkt.get_protocol(tcp.tcp)
sport = str(tcp_pkt.src_port)
dport = str(tcp_pkt.dst_port)
if proto == "17":
udp_pkt = pkt.get_protocol(udp.udp)
sport = str(udp_pkt.src_port)
dport = str(udp_pkt.dst_port)
self.logger.info("Packet in switch: %s, source IP: %s, destination IP: %s, From the port: %s", dpid_src, src_ip, dst_ip, in_port)
# self.logger.info("Packet in switch: %s, source MAC: %s, destination MAC: %s, From the port: %s", dpid_src, src, dst, in_port)
datapath_dst = get_datapath(self, self.mac_to_dpid[dst_MAC])
dpid_dst = datapath_dst.id
self.logger.info(" --- Destination present on switch: %s", dpid_dst)
# Shortest path computation
path = nx.shortest_path(self.net,dpid_src,dpid_dst)
self.logger.info(" --- Shortest path: %s", path)
# Set the flows for different cases
if len(path) == 1:
In_Port = self.mac_to_port[dpid_src][src]
Out_Port = self.mac_to_port[dpid_dst][dst]
actions_1 = [datapath.ofproto_parser.OFPActionOutput(Out_Port)]
actions_2 = [datapath.ofproto_parser.OFPActionOutput(In_Port)]
match_1 = parser.OFPMatch(in_port=In_Port, eth_dst=dst)
match_2 = parser.OFPMatch(in_port=Out_Port, eth_dst=src)
self.add_flow(datapath, 1, match_1, actions_1)
self.add_flow(datapath, 1, match_2, actions_2)
actions = [datapath.ofproto_parser.OFPActionOutput(Out_Port)]
data = msg.data
pkt = packet.Packet(data)
eth = pkt.get_protocols(ethernet.ethernet)[0]
# self.logger.info(" --- Changing destination mac to %s" % (eth.dst))
pkt.serialize()
out = datapath.ofproto_parser.OFPPacketOut(
datapath=datapath, buffer_id=0xffffffff, in_port=datapath.ofproto.OFPP_CONTROLLER,
actions=actions, data=pkt.data)
datapath.send_msg(out)
elif len(path) >= 2:
datapath_src = get_datapath(self, path[0])
datapath_dst = get_datapath(self, path[len(path)-1])
dpid_src = datapath_src.id
#self.logger.info("dpid_src %s", dpid_src)
dpid_dst = datapath_dst.id
#self.logger.info("dpid_dst %s", dpid_dst)
In_Port_src = self.mac_to_port[dpid_src][src]
#self.logger.info("In_Port_src %s", In_Port_src)
In_Port_dst = self.mac_to_port[dpid_dst][dst]
#self.logger.info("In_Port_dst %s", In_Port_dst)
Out_Port_src = self.net[path[0]][path[1]]['port']
#self.logger.info("Out_Port_src %s", Out_Port_src)
Out_Port_dst = self.net[path[len(path)-1]][path[len(path)-2]]['port']
#self.logger.info("Out_Port_dst %s", Out_Port_dst)
actions_1_src = [datapath.ofproto_parser.OFPActionOutput(Out_Port_src)]
match_1_src = parser.OFPMatch(in_port=In_Port_src, eth_type = 0x0800, ipv4_src=src_ip, ipv4_dst=dst_ip)
self.add_flow(datapath_src, 1, match_1_src, actions_1_src)
actions_2_src = [datapath.ofproto_parser.OFPActionOutput(In_Port_src)]
match_2_src = parser.OFPMatch(in_port=Out_Port_src, eth_type = 0x0800, ipv4_src=dst_ip, ipv4_dst=src_ip)
self.add_flow(datapath_src, 1, match_2_src, actions_2_src)
self.logger.info("Install the flow on switch %s", path[0])
actions_1_dst = [datapath.ofproto_parser.OFPActionOutput(Out_Port_dst)]
match_1_dst = parser.OFPMatch(in_port=In_Port_dst, eth_type = 0x0800, ipv4_src=dst_ip, ipv4_dst=src_ip)
self.add_flow(datapath_dst, 1, match_1_dst, actions_1_dst)
actions_2_dst = [datapath.ofproto_parser.OFPActionOutput(In_Port_dst)]
match_2_dst = parser.OFPMatch(in_port=Out_Port_dst, eth_type = 0x0800, ipv4_src=src_ip, ipv4_dst=dst_ip)
self.add_flow(datapath_dst, 1, match_2_dst, actions_2_dst)
self.logger.info("Install the flow on switch %s", path[len(path)-1])
if len(path) > 2:
for i in range(1, len(path)-1):
self.logger.info("Install the flow on switch %s", path[i])
In_Port_temp = self.net[path[i]][path[i-1]]['port']
Out_Port_temp = self.net[path[i]][path[i+1]]['port']
dp = get_datapath(self, path[i])
actions_1 = [dp.ofproto_parser.OFPActionOutput(Out_Port_temp)]
actions_2 = [dp.ofproto_parser.OFPActionOutput(In_Port_temp)]
match_1 = parser.OFPMatch(in_port=In_Port_temp, eth_type = 0x0800, ipv4_src=src_ip, ipv4_dst=dst_ip)
match_2 = parser.OFPMatch(in_port=Out_Port_temp, eth_type = 0x0800, ipv4_src=dst_ip, ipv4_dst=src_ip)
self.add_flow(dp, 1, match_1, actions_1)
self.add_flow(dp, 1, match_2, actions_2)
# Send the packet to the original switch
path_port = self.net[path[0]][path[1]]['port']
actions = [datapath.ofproto_parser.OFPActionOutput(path_port)]
data = msg.data
pkt = packet.Packet(data)
eth = pkt.get_protocols(ethernet.ethernet)[0]
# change the mac address of packet
eth.src = self.ip_to_mac[src_ip]
eth.dst = self.ip_to_mac[dst_ip]
# self.logger.info(" --- Changing destination mac to %s" % (eth.dst))
pkt.serialize()
out = datapath.ofproto_parser.OFPPacketOut(
datapath=datapath, buffer_id=0xffffffff, in_port=datapath.ofproto.OFPP_CONTROLLER,
actions=actions, data=pkt.data)
datapath.send_msg(out)
# actions = [datapath.ofproto_parser.OFPActionOutput(Out_Port)]
# data = msg.data
# pkt = packet.Packet(data)
# eth = pkt.get_protocols(ethernet.ethernet)[0]
# # self.logger.info(" --- Changing destination mac to %s" % (eth.dst))
# pkt.serialize()
# out = datapath.ofproto_parser.OFPPacketOut(
# datapath=datapath, buffer_id=0xffffffff, in_port=datapath.ofproto.OFPP_CONTROLLER,
# actions=actions, data=pkt.data)
# datapath.send_msg(out)
@set_ev_cls(event.EventSwitchEnter)
def get_topology_data(self, ev):
switch_list = get_switch(self.topology_api_app, None)
switches=[switch.dp.id for switch in switch_list]
self.net.add_nodes_from(switches)
links_list = get_link(self.topology_api_app, None)
links=[(link.src.dpid,link.dst.dpid,{'port':link.src.port_no}) for link in links_list]
self.net.add_edges_from(links)
links=[(link.dst.dpid,link.src.dpid,{'port':link.dst.port_no}) for link in links_list]
self.net.add_edges_from(links)
app_manager.require_app('ryu.app.ws_topology')
app_manager.require_app('ryu.app.ofctl_rest')
app_manager.require_app('ryu.app.gui_topology.gui_topology')
| ataeiamirhosein/SoftwareDefinedNet | sar_application_SDN.py | sar_application_SDN.py | py | 13,982 | python | en | code | 1 | github-code | 50 |
21852447518 | # -*- coding: utf-8 -*-
import pytest
from homology.abrams_y import the_complex
from homology.elementary_collapses import collapse_all
from homology.benchmarks.memoize import memoize
from sage.interfaces.chomp import have_chomp
assert have_chomp() is True
NS = [2, 3]
@memoize
def the_complex_(n, collapsed):
comp = the_complex(n)
return collapse_all(comp) if collapsed else comp
@pytest.mark.benchmark(group="construction")
@pytest.mark.parametrize("n", NS)
def test_construction(benchmark, n):
benchmark(the_complex, n)
@pytest.mark.benchmark(group="collapse")
@pytest.mark.parametrize("n", NS)
def test_collapse_n(benchmark, n):
benchmark(collapse_all, the_complex_(n, False))
@pytest.mark.benchmark(group="homology")
# @pytest.mark.parametrize("dim", [None, (0, 1)])
@pytest.mark.parametrize("algorithm", ["auto", "no_chomp"])
@pytest.mark.parametrize("n", NS)
def test_homology(benchmark, n, algorithm): #, dim):
if not (n == 4 and algorithm == "no_chomp"):
benchmark(the_complex(n, False).homology, dim=dim, algorithm=algorithm)
else:
benchmark(lambda x: x is None, None)
| langston-barrett/computational-homology | homology/benchmarks/test_elementary_collapses.py | test_elementary_collapses.py | py | 1,129 | python | en | code | 1 | github-code | 50 |
36225975709 | import os
import errno
import logging
import logging.config
import threading
import serial
import json
import time
import queue
from entities.entity import Session, engine, Base
from entities.btn import Btn
from entities.btnaction import BtnAction
from entities.action import Action
from entities.command import Command
from entities.arg import Arg
from entities.argvalue import ArgValue
from entities.devicetype import DeviceType
from entities.device import Device
from entities.group_d import Group_d
from lifxlan import LifxLAN
from lifxlan.errors import WorkflowException
from groupext.groupext import GroupExt
Base.metadata.create_all(engine)
UPDATE_INTERVAL = 30
def setup_logging(
default_path='logging.json',
default_level=logging.INFO,
env_key='LOG_CFG'
):
"""Setup logging configuration"""
path = default_path
value = os.getenv(env_key, None)
try:
os.makedirs('logs')
except OSError as e:
if e.errno != errno.EEXIST:
raise
if value:
path = value
if os.path.exists(path):
with open(path, 'rt') as f:
config = json.load(f)
logging.config.dictConfig(config)
else:
logging.basicConfig(
level=logging.DEBUG,
format='[%(levelname)-8s] [%(asctime)s]'
' [%(threadName)-12s] %(message)s',
)
def read_serial(event_queue):
ser = serial.Serial('/dev/ttyUSB0', 19200, 8, 'N', 1, timeout=1)
while True:
output = ser.readline().decode("utf-8").strip()
if output != '':
event_queue.put(output)
def update_group(group_id, group_list):
logger = logging.getLogger(__name__)
lifx = LifxLAN()
db = Session()
group = db.query(Group_d).filter_by(id=group_id).first()
is_new = group.id not in group_list
is_updated = group.devices_updated
if is_new or is_updated:
status = []
if is_new:
status.append("New")
if is_updated:
status.append("Updated")
new_group = None
try:
new_group = GroupExt(
lifx.get_devices_by_name(
[device.name for device in group.devices]
).get_device_list()
)
except WorkflowException:
logger.warning(
"Group failed - WorkflowException",
exc_info=True
)
if new_group is not None:
status_str = ",".join(status)
devices_str = ", ".join(
[device.name for device in group.devices]
)
logger.info(
"{} - Group: {}\nDevices: {}"
.format(status_str, group.name, devices_str)
)
group_list[group.id] = new_group
group.devices_updated = False
if len(group.devices) != len(new_group.get_device_list()):
# just using this as a flag to force it to try again
group.devices_updated = True
db.commit()
db.close()
def update_groups_loop(groups_list):
while True:
db = Session()
db_groups = db.query(Group_d).all()
db.close()
update_threads = [
threading.Thread(
name="UpdateGroup-{}".format(group.id),
target=update_group,
args=(group.id, groups_list),
daemon=True
)
for group in db_groups
]
for t in update_threads:
t.start()
for t in update_threads:
t.join()
time.sleep(1)
def main():
logger = logging.getLogger(__name__)
lifx_groups = {}
event_queue = queue.Queue()
serial_thread = threading.Thread(
name="SerialRead",
target=read_serial,
args=(event_queue,),
daemon=True
)
serial_thread.start()
group_update_thread = threading.Thread(
name="GroupUpdate",
target=update_groups_loop,
args=(lifx_groups,),
daemon=True
)
group_update_thread.start()
while True:
if event_queue.qsize() > 0:
message = json.loads(event_queue.get())
if message["success"]:
sender = message["sender"]
action = message["action"]
count = message["count"]
logger.info(
"Sent by: {:2} Action: {:9} Count: {:5}"
.format(sender, action, count)
)
db = Session()
action = db.query(Action).filter_by(name=action).first().id
btn_action = db.query(BtnAction).filter_by(
btn_id=sender, action_id=action
).first()
counter_valid = False
btn = db.query(Btn).filter_by(id=sender).first()
if btn is not None and btn.counter < count:
btn.counter = count
counter_valid = True
db.commit()
if btn_action is not None and counter_valid:
try:
group = lifx_groups[btn_action.group_id]
db_group = db.query(Group_d).filter_by(
id=btn_action.group_id
).first()
command = db.query(Command).filter_by(
id=btn_action.command_id
).first()
# args = btn_action.get_args_tuple()
# kwargs = btn_action.get_kwargs_dict()
cmd = getattr(group, command.name, None)
if callable(cmd):
# result = cmd(*args, **kwargs)
args = btn_action.get_args()
result = cmd(**args)
if result is None:
result = ""
logger.info(
"Group: '{}' "
"Command: {}({}) "
"Result: {}"
.format(
db_group.name,
command.name,
btn_action.get_args_str(),
result
)
)
else:
logger.warning(
"{} could not be run or is not a function"
.format(command.name)
)
except IndexError as e:
logger.warning(
"Lifx Group doesn't exist - WorkflowException: {}"
.format(e.message),
exc_info=True
)
db.close()
time.sleep(0.01)
if __name__ == "__main__":
setup_logging()
main()
| thatcrazygame/tiny_btn_hub | backend/src/hub.py | hub.py | py | 7,107 | python | en | code | 0 | github-code | 50 |
26636133784 | #!/usr/bin/python3.5
def find_uniq(arr):
count = {}
arr_len = len(arr)
count[arr[0]] = 0
for item in arr:
if item != arr[0]:
count[item] = 0
i = 0
while i < arr_len:
j = 0
if count[arr[i]] == 0:
while j < arr_len:
if arr[i] == arr[j]:
count[arr[i]] += 1
j += 1
i += 1
for k,v in count.items():
if v == 1:
return k
else:
print("No unique element in the list!")
return count
if __name__ == "__main__":
print(find_uniq([ 3, 10, 3, 3, 3 ,5,12,5,6,7,7,7,32,12,10])) | PeterZhangxing/codewars | find_uniq.py | find_uniq.py | py | 658 | python | en | code | 0 | github-code | 50 |
26992725477 | # pip install pyttsx3 ; this works offline
import pyttsx3
def text_to_Speech(audio):
engine.say(audio)
engine.runAndWait()
engine = pyttsx3.init() # ----------->> object for pyttsx3 class
for voice in engine.getProperty("voices"): # ------->> To check the number of voices in system
print(voice)
voices = engine.getProperty("voices")
engine.setProperty("voice", voices[0].id) # --------->> Setting up any voice
text = input("Enter your text : ")
text_to_Speech(text) | Mansish-101M99/Python-Projects | Text-toSpeech Generator/txttospeech1.py | txttospeech1.py | py | 504 | python | en | code | 1 | github-code | 50 |
71649771356 | class Node:
def __init__(self, val=None):
self.val = val
self.next = None
class LinkedList:
def __init__(self):
self.head = None
def push(self,data):
new_node = Node(data)
new_node.next=self.head
self.head = new_node
def printList(self):
temp = self.head
while(temp):
print(temp.val , end = ' ')
temp = temp.next
def Q6(self):
cur = self.head
if cur == None:
return
while cur.next != None:
if cur.val == cur.next.val:
next = cur.next.next
cur.next = None
cur.next = next
else:
cur = cur.next
return cur
if __name__ == "__main__":
list = LinkedList()
list.push(1)
list.push(2)
list.push(2)
list.Q6()
list.printList()
| devlmhieu7521/Interview_Answering | code python/Q6.py | Q6.py | py | 893 | python | en | code | 0 | github-code | 50 |
24076609465 | import logging
from typing import List, Dict, Union
import pandas as pd
import openomics
from .clinical import (
ClinicalData,
HISTOLOGIC_SUBTYPE_COL,
PATHOLOGIC_STAGE_COL,
TUMOR_NORMAL_COL,
PREDICTED_SUBTYPE_COL,
)
from .genomics import SomaticMutation, CopyNumberVariation, DNAMethylation
from .imageomics import WholeSlideImage
from .proteomics import Protein
from .transcriptomics import MessengerRNA, MicroRNA, LncRNA, Expression
class MultiOmics:
"""A data object which holds multiple -omics data for a single clinical cohort."""
def __init__(self, cohort_name, omics_data=None):
"""
Args:
cohort_name (str): the clinical cohort name
"""
self._cohort_name = cohort_name
self._omics = []
# This is a data dictionary accessor to retrieve individual -omic data
self.data = {}
if omics_data:
for omics in omics_data:
self.add_omic(omics)
def add_omic(self,
omic_data: Expression,
initialize_annotations: bool = True):
"""Adds an omic object to the Multiomics such that the samples in omic
matches the samples existing in the other omics.
Args:
omic_data (Expression): The omic to add, e.g., MessengerRNA,
MicroRNA, LncRNA, etc.
initialize_annotations (bool): default True. If true, initializes
the annotation dataframe in the omic object
"""
self.__dict__[omic_data.name()] = omic_data
if omic_data.name not in self._omics:
self._omics.append(omic_data.name())
# dictionary as data accessor to the expression data
self.data[omic_data.name()] = omic_data.expressions
# Initialize annotation
if initialize_annotations:
omic_data.initialize_annotations(index=omic_data.gene_index,
gene_list=None)
logging.info(
omic_data.name(),
self.data[omic_data.name()].shape if hasattr(
self.data[omic_data.name()], "shape") else ": None",
", indexed by:",
omic_data.annotations.index.name,
)
def add_clinical_data(self, clinical: openomics.clinical.ClinicalData, **kwargs):
""" Add a ClinicalData instance to the MultiOmics instance.
Args:
clinical (openomics.clinical.ClinicalData):
"""
if not isinstance(clinical, ClinicalData):
raise Exception("Must pass a ClinicalData in, not a file path.")
self.clinical = clinical
self.data["PATIENTS"] = self.clinical.patient
if hasattr(self.clinical, "biospecimen"):
self.data["BIOSPECIMENS"] = self.clinical.biospecimen
if hasattr(self.clinical, "drugs"):
self.data["DRUGS"] = self.clinical.drugs
self.build_samples(**kwargs)
def get_omics_list(self):
return self._omics
def __getitem__(self, item:str):
"""This function allows the MultiOmicData class objects to access
individual omics by a dictionary lookup, e.g. openomics["MicroRNA"]
Args:
item (str): a string of the class name
"""
if item.lower() == MessengerRNA.name().lower():
return self.__getattribute__(MessengerRNA.name())
elif item.lower() == MicroRNA.name().lower():
return self.__getattribute__(MicroRNA.name())
elif item.lower() == LncRNA.name().lower():
return self.__getattribute__(LncRNA.name())
elif item.lower() == WholeSlideImage.name().lower():
return self.__getattribute__(WholeSlideImage.name())
elif item.lower() == SomaticMutation.name().lower():
return self.__getattribute__(SomaticMutation.name())
elif item.lower() == CopyNumberVariation.name().lower():
return self.__getattribute__(CopyNumberVariation.name())
elif item.lower() == DNAMethylation.name().lower():
return self.__getattribute__(DNAMethylation.name())
elif item.lower() == Protein.name().lower():
return self.__getattribute__(Protein.name())
elif item.lower() == "patients":
return self.clinical.patient
elif item.lower() == "samples":
return self.clinical.samples
elif item.lower() == "drugs":
return self.clinical.drugs
else:
raise Exception(
'String accessor must be one of {"MessengerRNA", "MicroRNA", "LncRNA", "Protein", etc.}'
)
def remove_duplicate_genes(self):
"""Removes duplicate genes between any omics such that the gene index
across all omics has no duplicates.
"""
for omic_A in self._omics:
for omic_B in self._omics:
if omic_A != omic_B:
self.__getattribute__(omic_A).drop_genes(
set(self.__getattribute__(omic_A).get_genes_list())
& set(self.__getattribute__(omic_B).get_genes_list()))
def build_samples(self, agg_by="union"):
"""Running this function will build a dataframe for all samples across
the different omics (either by a union or intersection). Then,
Args:
agg_by (str): ["union", "intersection"]
"""
# make sure at least one ExpressionData present
if len(self._omics) < 1:
logging.debug(
"build_samples() does nothing. Must add at least one omic to this MultiOmics object."
)
return
all_samples = pd.Index([])
for omic in self._omics:
if agg_by == "union":
all_samples = all_samples.union(self.data[omic].index)
elif agg_by == "intersection":
all_samples = all_samples.intersection(self.data[omic].index)
if hasattr(self, "clinical"):
self.clinical.build_clinical_samples(all_samples)
self.data["SAMPLES"] = self.clinical.samples.index
else:
self.data["SAMPLES"] = all_samples
def __dir__(self):
return list(self.data.keys())
def match_samples(self, omics) -> pd.Index:
"""Return the index of bcr_sample_barcodes of the intersection of
samples from all modalities
Args:
omics: An array of modalities
Returns:
matched_sapmles: An pandas Index list
"""
# TODO check that for single modalities, this fetch all patients
matched_samples = self.data[omics[0]].index.copy()
for omic in omics:
matched_samples = matched_samples.join(self.data[omic].index,
how="inner")
return matched_samples
def load_data(
self,
omics,
target=["pathologic_stage"],
pathologic_stages=None,
histological_subtypes=None,
predicted_subtypes=None,
tumor_normal=None,
samples_barcode=None,
):
# type: (Union[List[str], str], List[str], List[str], List[str], List[str], List[str], List[str]) -> (Dict[str, pd.DataFrame], pd.DataFrame)
"""
Args:
omics (list): A list of the data modalities to load. Default "all"
to select all modalities
target (list): The clinical data fields to include in the
pathologic_stages (list): Only fetch samples having certain stages
in their corresponding patient's clinical data. For instance,
["Stage I", "Stage II"] will only fetch samples from Stage I and
Stage II patients. Default is [] which fetches all pathologic
stages.
histological_subtypes: A list specifying the histological subtypes
to fetch. Default is [] which fetches all histological sybtypes.
predicted_subtypes: A list specifying the histological subtypes to
fetch. Default is [] which fetches all histological sybtypes.
tumor_normal: ["Tumor", "Normal"]. Default is [], which fetches all
tumor or normal sample types.
samples_barcode: A list of sample's barcode. If not None, only fetch
data with matching samples provided in this list.
Returns:
(X, y): Returns X, a dictionary containing the multiomics data that
have data
"""
if omics == "all" or omics is None:
omics = self._omics
matched_samples = self.match_samples(omics)
if samples_barcode is not None:
matched_samples = samples_barcode
if hasattr(self, "clinical") and isinstance(self.clinical,
ClinicalData):
# Build targets clinical data
y = self.get_sample_attributes(matched_samples)
# Select only samples with certain cancer stage or subtype
if pathologic_stages:
y = y[y[PATHOLOGIC_STAGE_COL].isin(pathologic_stages)]
if histological_subtypes:
y = y[y[HISTOLOGIC_SUBTYPE_COL].isin(histological_subtypes)]
if predicted_subtypes:
y = y[y[PREDICTED_SUBTYPE_COL].isin(predicted_subtypes)]
if tumor_normal:
y = y[y[TUMOR_NORMAL_COL].isin(tumor_normal)]
# Filter y target column labels
y = y.filter(target)
y.dropna(axis=0, inplace=True)
matched_samples = y.index
else:
y = None
# Build expression matrix for each omic, indexed by matched_samples
X_multiomics = {}
for omic in omics:
X_multiomics[omic] = self.data[omic].loc[
matched_samples, self[omic].get_genes_list()]
return X_multiomics, y
def get_sample_attributes(self, matched_samples):
"""Fetch patient's clinical data for each given samples barcodes in the
matched_samples
Returns
samples_index: Index of samples
Args:
matched_samples: A list of sample barcodes
"""
return self.data["SAMPLES"].reindex(matched_samples)
def print_sample_sizes(self):
for omic in self.data:
print(
omic,
self.data[omic].shape
if hasattr(self.data[omic], "shape") else "Didn't import data",
)
def annotate_samples(self, dictionary):
"""This function adds a "predicted_subtype" field to the patients
clinical data. For instance, patients were classified into subtypes
based on their expression profile using k-means, then, to use this
function, do:
annotate_patients(dict(zip(patient index>, <list of corresponding patient's subtypes>)))
Adding a field to the patients clinical data allows openomics to
query the patients data through the .load_data(subtypes=[])
parameter,
Args:
dictionary: A dictionary mapping patient's index to a subtype
"""
self.data["PATIENTS"] = self.data["PATIENTS"].assign(
subtypes=self.data["PATIENTS"][
self.clinical.patient_column].map(dictionary))
| FernandoMarcon/bench | omics/open-omics/env/lib/python3.10/site-packages/openomics/multiomics.py | multiomics.py | py | 11,398 | python | en | code | 0 | github-code | 50 |
8496933853 | # web_app/routes/home_routes.py
from flask import Blueprint, render_template
from web_app.models import User
home_routes = Blueprint("home_routes", __name__)
@home_routes.route("/")
def index():
screen_names = User.query.with_entities(User.screen_name).distinct()
for screen_name in screen_names:
print(screen_name[0])
# breakpoint()
# FETCH USERS FROM DATABASE
# breakpoint()
return render_template("prediction_form.html", screen_names=screen_names)
@home_routes.route("/hello")
def hello():
x = 2 + 2
return f"About me {x}" | jasimrashid/twitoff | web_app/routes/home_routes.py | home_routes.py | py | 573 | python | en | code | 0 | github-code | 50 |
24333899889 | import time
from typing import List
class Solution:
def maxSubArray(self, nums: List[int]) -> int:
max_sum = nums[0]
curr_sum = 0
for n in nums:
if curr_sum < 0:
curr_sum = 0
curr_sum += n
max_sum = max(max_sum, curr_sum)
return max_sum
n1 = [-2, 1, -3, 4, -1, 2, 1, -5, 4]
n2 = [5, 4, -1, 7, 8]
n3 = [1]
if __name__ == '__main__':
sol = Solution()
start_time = time.time()
print(sol.maxSubArray(n1))
print(sol.maxSubArray(n2))
print(sol.maxSubArray(n3))
print("----%s seconds----" % (time.time() - start_time))
| zluo16/python-data-structures-and-algorithms | blind_75/arrays/max_sub_array.py | max_sub_array.py | py | 633 | python | en | code | 0 | github-code | 50 |
40185943580 | import FWCore.ParameterSet.Config as cms
process = cms.Process('TEST')
process.options.wantSummary = True
process.load('FWCore.MessageService.MessageLogger_cfi')
process.MessageLogger.cerr.FwkReport.reportEvery = 100 # only report every 100th event start
process.MessageLogger.cerr.enableStatistics = False # enable "MessageLogger Summary" message
process.MessageLogger.cerr.threshold = 'INFO' # change to 'WARNING' not to show INFO-level messages
## enable reporting of INFO-level messages (default is limit=0, i.e. no messages reported)
#process.MessageLogger.cerr.INFO = cms.untracked.PSet(
# reportEvery = cms.untracked.int32(1), # every event!
# limit = cms.untracked.int32(-1) # no limit!
#)
# read back the trigger decisions
process.source = cms.Source('PoolSource',
fileNames = cms.untracked.vstring('file:trigger.root')
)
from HLTrigger.HLTfilters.triggerResultsFilter_cfi import triggerResultsFilter as _trigResFilter
_triggerResultsFilter = _trigResFilter.clone( l1tResults = '' )
# accept if 'Path_1' succeeds
process.filter_1 = _triggerResultsFilter.clone(
triggerConditions = ( 'Path_1', )
)
# accept if 'Path_2' succeeds
process.filter_2 = _triggerResultsFilter.clone(
triggerConditions = ( 'Path_2', )
)
# accept if 'Path_3' succeeds
process.filter_3 = _triggerResultsFilter.clone(
triggerConditions = ( 'Path_3', )
)
# accept if any path succeeds (explicit OR)
process.filter_any_or = _triggerResultsFilter.clone(
triggerConditions = ( 'Path_1', 'Path_2', 'Path_3' )
)
# accept if 'Path_1' succeeds, prescaled by 15
process.filter_1_pre = _triggerResultsFilter.clone(
triggerConditions = ( '(Path_1) / 15', )
)
# accept if 'Path_1' succeeds, prescaled by 15
# masking Path_2 (equivalent to filter_1_pre)
process.filter_1_pre_with_masks1 = _triggerResultsFilter.clone(
triggerConditions = ( '(Path_1 / 15 OR Path_2) MASKING Path_2', )
)
# accept if 'Path_1' succeeds, prescaled by 15
# masking Path_2 and Path_3 (equivalent to filter_1_pre)
process.filter_1_pre_with_masks2 = _triggerResultsFilter.clone(
triggerConditions = ( '(Path_? / 15) MASKING Path_2 MASKING Path_3', )
)
# accept if 'Path_1' prescaled by 15 does not succeed
process.filter_not_1_pre = _triggerResultsFilter.clone(
triggerConditions = ( 'NOT (Path_1 / 15)', )
)
# accept if 'Path_2' succeeds, prescaled by 10
process.filter_2_pre = _triggerResultsFilter.clone(
triggerConditions = ( '(Path_2 / 10)', )
)
# accept if any path succeeds, with different prescales (explicit OR, prescaled)
process.filter_any_pre = _triggerResultsFilter.clone(
triggerConditions = ( 'Path_1 / 15', 'Path_2 / 10', 'Path_3 / 6', )
)
# equivalent of filter_any_pre using NOT operator twice
process.filter_any_pre_doubleNOT = _triggerResultsFilter.clone(
triggerConditions = ( 'NOT NOT (Path_1 / 15 OR Path_2 / 10 OR Path_3 / 6)', )
)
# opposite of filter_any_pre without whitespaces where possible
process.filter_not_any_pre = _triggerResultsFilter.clone(
triggerConditions = ( 'NOT(Path_1/15)AND(NOT Path_2/10)AND(NOT Path_3/6)', )
)
# accept if Path_1 and Path_2 have different results (XOR) without using XOR operator
process.filter_1xor2_withoutXOR = _triggerResultsFilter.clone(
triggerConditions = ( 'Path_1 AND NOT Path_2', 'NOT Path_1 AND Path_2', )
)
# accept if Path_1 and Path_2 have different results (XOR) using XOR operator
process.filter_1xor2_withXOR = _triggerResultsFilter.clone(
triggerConditions = ( 'Path_1 XOR Path_2', )
)
# accept if any path succeeds (wildcard, '*')
process.filter_any_star = _triggerResultsFilter.clone(
triggerConditions = ( '*', )
)
# accept if any path succeeds (wildcard, twice '*')
process.filter_any_doublestar = _triggerResultsFilter.clone(
triggerConditions = ( '*_*', )
)
# accept if any path succeeds (wildcard, '?')
process.filter_any_question = _triggerResultsFilter.clone(
triggerConditions = ( 'Path_?', )
)
# accept if all path succeed (explicit AND)
process.filter_all_explicit = _triggerResultsFilter.clone(
triggerConditions = ( 'Path_1 AND Path_2 AND Path_3', )
)
# wrong path name (explicit)
process.filter_wrong_name = _triggerResultsFilter.clone(
triggerConditions = ( 'Wrong', ),
throw = False
)
# wrong path name (wildcard)
process.filter_wrong_pattern = _triggerResultsFilter.clone(
triggerConditions = ( '*_Wrong', ),
throw = False
)
# empty path list
process.filter_empty_pattern = _triggerResultsFilter.clone(
triggerConditions = ( )
)
# L1-like path name
process.filter_l1path_pattern = _triggerResultsFilter.clone(
# this returns False for every event without throwing exceptions,
# because here l1tResults is an empty InputTag
# (patterns starting with "L1_" are used exclusively to check the L1-Trigger decisions)
triggerConditions = ( 'L1_Path', )
)
# real L1 trigger
process.filter_l1singlemuopen_pattern = _triggerResultsFilter.clone(
# this returns False for every event without throwing exceptions,
# because here l1tResults is an empty InputTag
# (patterns starting with "L1_" are used exclusively to check the L1-Trigger decisions)
triggerConditions = ( 'L1_SingleMuOpen', )
)
# TRUE
process.filter_true_pattern = _triggerResultsFilter.clone(
triggerConditions = ( 'TRUE', )
)
# FALSE
process.filter_false_pattern = _triggerResultsFilter.clone(
triggerConditions = ( 'FALSE', )
)
process.path_1 = cms.Path( process.filter_1 )
process.path_2 = cms.Path( process.filter_2 )
process.path_3 = cms.Path( process.filter_3 )
process.path_all_explicit = cms.Path( process.filter_all_explicit )
process.path_any_or = cms.Path( process.filter_any_or )
process.path_any_star = cms.Path( process.filter_any_star )
process.path_1_pre = cms.Path( process.filter_1_pre )
process.path_1_pre_with_masks1 = cms.Path( process.filter_1_pre_with_masks1 )
process.path_1_pre_with_masks2 = cms.Path( process.filter_1_pre_with_masks2 )
process.path_not_1_pre = cms.Path( process.filter_not_1_pre )
process.path_2_pre = cms.Path( process.filter_2_pre )
process.path_any_pre = cms.Path( process.filter_any_pre )
process.path_any_pre_doubleNOT = cms.Path( process.filter_any_pre_doubleNOT )
process.path_not_any_pre = cms.Path( process.filter_not_any_pre )
process.Check_1xor2_withoutXOR = cms.Path( process.filter_1xor2_withoutXOR )
process.Check_1xor2_withXOR = cms.Path( process.filter_1xor2_withXOR )
process.path_any_doublestar = cms.Path( process.filter_any_doublestar )
process.path_any_question = cms.Path( process.filter_any_question )
process.path_wrong_name = cms.Path( process.filter_wrong_name )
process.path_wrong_pattern = cms.Path( process.filter_wrong_pattern )
process.path_not_wrong_pattern = cms.Path( ~ process.filter_wrong_pattern )
process.path_empty_pattern = cms.Path( process.filter_empty_pattern )
process.path_l1path_pattern = cms.Path( process.filter_l1path_pattern )
process.path_l1singlemuopen_pattern = cms.Path( process.filter_l1singlemuopen_pattern )
process.path_true_pattern = cms.Path( process.filter_true_pattern )
process.path_false_pattern = cms.Path( process.filter_false_pattern )
# define an EndPath to analyze all other path results
process.hltTrigReport = cms.EDAnalyzer( 'HLTrigReport',
HLTriggerResults = cms.InputTag( 'TriggerResults', '', '@currentProcess' )
)
process.HLTAnalyzerEndpath = cms.EndPath( process.hltTrigReport )
| cms-sw/cmssw | HLTrigger/HLTfilters/test/testTriggerResultsFilter_by_TriggerResults_cfg.py | testTriggerResultsFilter_by_TriggerResults_cfg.py | py | 7,494 | python | en | code | 985 | github-code | 50 |
39260960064 | # Assorted functions
import pickle
import os.path
from googleapiclient.discovery import build
def initialize_sheets():
# Get Credentials
token_path = os.path.join(os.path.dirname(
os.path.relpath(__file__)), "token.pickle")
with open(token_path, 'rb') as token:
creds = pickle.load(token)
service = build('sheets', 'v4', credentials=creds)
sheets = service.spreadsheets()
# Initialize
return sheets
| jsowder/personal-django | sheets/funs.py | funs.py | py | 446 | python | en | code | 0 | github-code | 50 |
8876543996 | from flask_restx import fields, Namespace
from flask_restx.reqparse import RequestParser
from flask import request
from http import HTTPStatus
from marshmallow_sqlalchemy.fields import Nested
from datetime import datetime, timedelta
from dateutil import parser as dparser
from openapi_genclient.models import (
Operation as TinOperation,
OperationTrade as TinOperationTrade,
MoneyAmount
)
from app.logic.tinkoff_client import client, ACCOUNT_ID
from .common import Resource, abort
from app.models import ma
from .portfolio import MoneyAmountSchema
from app.logic.date import date_format
operations_ns = ns = Namespace(
name="operations", description="CRUD operations", path="/operations"
)
class TinOperationTradeSchema(ma.Schema):
class Meta:
model = TinOperationTrade
fields = [
'date',
'price',
'quantity',
'trade_id'
]
class TinOperationSchema(ma.Schema):
commission = Nested(MoneyAmountSchema)
trades = Nested(TinOperationTradeSchema, many=True)
class Meta:
model = TinOperation
fields = [
'date',
'figi',
'operation_type',
'quantity',
'price',
'currency',
'status',
'trades',
'instrument_type',
'is_margin_call',
'payment',
'commission',
'id',
]
ordered = True
req_parser = RequestParser(bundle_errors=True)
req_parser.add_argument(
name='from', type=str, required=False, nullable=True
)
req_parser.add_argument(
name='to', type=str, required=False, nullable=True
)
req_parser.add_argument(
name='hours', type=int, required=False, nullable=True
)
@ns.route('', endpoint='operations')
class OperationsResource(Resource):
@ns.expect(req_parser)
@ns.response(int(HTTPStatus.OK), description="Ok")
@ns.response(int(HTTPStatus.BAD_REQUEST), description="Bad request")
@ns.response(int(HTTPStatus.INTERNAL_SERVER_ERROR), description="Server error")
def get(self):
assert type(ACCOUNT_ID) == str
assert len(ACCOUNT_ID) > 0
_from = request.args.get('from', default=None, type=str)
to = request.args.get('to', default=date_format(datetime.now()), type=str)
delta_hours = request.args.get('hours', default=16, type=int)
dt_to = dparser.parse(to)
to = date_format(dt_to)
if not _from:
dt_from = dt_to - timedelta(hours=delta_hours)
else:
dt_from = dparser.parse(_from)
_from = date_format(dt_from)
resp = client.operations.operations_get(_from=_from, to=to, broker_account_id=ACCOUNT_ID)
assert resp.status == 'Ok'
assert type(resp.payload.operations) == list
# print('---', resp.payload.operations)
operations = resp.payload.operations
tin_operations_schema = TinOperationSchema()
res = tin_operations_schema.dump(operations, many=True)
return res, HTTPStatus.OK
| jackalissimo/pipkoff | app/routes/api_v0/operations.py | operations.py | py | 3,064 | python | en | code | 0 | github-code | 50 |
11401326887 | import socket
target_host = '144.202.120.116'
target_port = 6999
for i in range(0, 1000):
# 建立一个socket对象,参数AF说明我们将使用标准ipv4地址或host,SOCK说明这将是一个TCP客户端
client = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
# 连接客户端
client.connect((target_host, target_port))
# 发送一些数据,发送一条信息,python3只接收btye流
print('我已经发送你的信息{}'.format(i))
client.send('GET / HTTP/1.1\r\nHost: 144.202.120.116\r\nConnection:keep-alive\r\n\r\n'.encode('utf-8'))
# 输出接收的信息
response = client.recv(4096).decode()
print('我已经收到服务端的返回的信息')
print(response)
| Zealper/MyStudyMaterials | PythonBlackHatLearning/TCPclient.py | TCPclient.py | py | 728 | python | zh | code | 0 | github-code | 50 |
37953111765 | from typing import Dict, List, Tuple
import json
import logging
from overrides import overrides
from allennlp.common.file_utils import cached_path
from allennlp.data.dataset_readers.dataset_reader import DatasetReader
from allennlp.data.fields import Field, TextField, LabelField, SpanField
from allennlp.data.tokenizers.token import Token
from allennlp.data.instance import Instance
from allennlp.data.token_indexers import SingleIdTokenIndexer, TokenIndexer
from allennlp.data.tokenizers.word_splitter import OpenAISplitter
logger = logging.getLogger(__name__) # pylint: disable=invalid-name
class SemEval2010Task8Reader(DatasetReader):
def __init__(self,
token_indexers: Dict[str, TokenIndexer] = None,
lazy: bool = False) -> None:
super().__init__(lazy)
self._token_indexers = token_indexers or {'tokens': SingleIdTokenIndexer()}
@overrides
def _read(self, file_path: str):
# if `file_path` is a URL, redirect to the cache
file_path = cached_path(file_path)
with open(file_path, 'r') as semeval_file:
logger.info("Reading SemEval 2010 Task 8 instances from jsonl dataset at: %s", file_path)
for line in semeval_file:
example = json.loads(line)
tokens = example["tokens"]
label = example["label"]
entity_indices = example["entities"]
start_e1, end_e1 = entity_indices[0]
start_e2, end_e2 = entity_indices[1]
entity_1 = (start_e1, end_e1 - 1)
entity_2 = (start_e2, end_e2 - 1)
yield self.text_to_instance(tokens, entity_1, entity_2, label)
@overrides
def text_to_instance(self, # type: ignore
tokens: List[str],
entity_1: Tuple[int],
entity_2: Tuple[int],
label: str = None) -> Instance:
# pylint: disable=arguments-differ
fields: Dict[str, Field] = {}
tokens = [OpenAISplitter._standardize(token) for token in tokens]
tokens = ['__start__'] + tokens[entity_1[0]:entity_1[1]+1] + ['__del1__'] + tokens[entity_2[0]:entity_2[1]+1] + ['__del2__'] + tokens + ['__clf__']
sentence = TextField([Token(text=t) for t in tokens], self._token_indexers)
fields['sentence'] = sentence
#fields['entity1'] = SpanField(*entity_1, sequence_field=sentence)
#fields['entity2'] = SpanField(*entity_2, sequence_field=sentence)
if label:
fields['label'] = LabelField(label)
return Instance(fields)
| DFKI-NLP/DISTRE | tre/dataset_readers/semeval_2010_task_8_reader.py | semeval_2010_task_8_reader.py | py | 2,710 | python | en | code | 82 | github-code | 50 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.