seq_id stringlengths 4 11 | text stringlengths 113 2.92M | repo_name stringlengths 4 125 ⌀ | sub_path stringlengths 3 214 | file_name stringlengths 3 160 | file_ext stringclasses 18
values | file_size_in_byte int64 113 2.92M | program_lang stringclasses 1
value | lang stringclasses 93
values | doc_type stringclasses 1
value | stars int64 0 179k ⌀ | dataset stringclasses 3
values | pt stringclasses 78
values |
|---|---|---|---|---|---|---|---|---|---|---|---|---|
32506810174 | class Validation():
def __init__(self, filledList):
self.__filledBoard = filledList
def EmptyCells(self):
empty = 0
for row in range(9):
for col in range(9):
if(self.__filledBoard[row][col][0].get() == ""):
empty += 1
return empty > 64
def __validateRows(self):
for row in range(9):
for col1 in range(9):
for col2 in range(9):
if col1 == col2 or self.__filledBoard[row][col1][0].get() == "":
continue
if self.__filledBoard[row][col1][0].get() == self.__filledBoard[row][col2][0].get():
return (True, "Row", row+1)
return (False, "", 0)
def __validateColumns(self):
for col in range(9):
for row1 in range(9):
for row2 in range(9):
if row1 == row2 or self.__filledBoard[row1][col][0].get() == "":
continue
if self.__filledBoard[row1][col][0].get() == self.__filledBoard[row2][col][0].get():
return (True, "Column", col+1)
return (False, "", 0)
def __validateBoxs(self):
for col in range(0, 9, 3):
for row in range(0, 9, 3):
box_x = col // 3
box_y = row // 3
for i in range(box_y*3, box_y*3 + 3):
for j in range(box_x * 3, box_x*3 + 3):
for ii in range(box_y*3, box_y*3 + 3):
for jj in range(box_x * 3, box_x*3 + 3):
if i == ii or j == jj or self.__filledBoard[i][j][0].get() == "":
continue
if self.__filledBoard[i][j][0].get() == self.__filledBoard[ii][jj][0].get():
return (True, "Box", ((box_y * 3) + box_x+1))
return (False, "", 0)
def validateBoard(self):
checkRows = self.__validateRows()
if(checkRows[0]):
return checkRows
checkColumns = self.__validateColumns()
if(checkColumns[0]):
return checkColumns
checkBoxs = self.__validateBoxs()
if(checkBoxs[0]):
return checkBoxs
return (False, "", 0) | Acro146/Sudoku | validation.py | validation.py | py | 2,356 | python | en | code | 1 | github-code | 13 |
22073932904 |
import torch
import torch.nn as nn
import dgl.function as fn
import torch.nn.functional as F
from models.encoder.ogb_encoder import OGB_NodeEncoder, OGB_EdgeEncoder
from models.norm.gnn_norm import GNN_Norm
from models.pool.global_pool import GlobalPooling
from models.activation.local_activation import LocalActivation
class GCNConvLayer_SparseAdj(nn.Module):
def __init__(self, embed_dim, aggregator_type='mean', self_loops=True):
super(GCNConvLayer_SparseAdj, self).__init__()
self.aggregator_type = aggregator_type
self.self_loops = self_loops
self.update_feat = nn.Linear(embed_dim, embed_dim)
def aggregate(self, graphs, nfeat, efeat, aggregator_type, self_loops):
num_node = nfeat.shape[0]
degrees = graphs.in_degrees()
### adj matrix
adj_indx = torch.stack(graphs.edges(), 0)
adj_elem = torch.ones(efeat.shape[0]).to(adj_indx.device)
adj_neibor = torch.sparse.FloatTensor(adj_indx, adj_elem, torch.Size([num_node, num_node]))
adj_matrix = adj_neibor
if self_loops:
self_loop_edge = torch.LongTensor([range(num_node), range(num_node)]).to(adj_indx.device)
self_elem = torch.ones(num_node).to(adj_indx.device)
adj_self = torch.sparse.FloatTensor(self_loop_edge, self_elem, torch.Size([num_node, num_node]))
adj_matrix = adj_matrix + adj_self
degrees = degrees + 1
### feature aggregate
rst = torch.spmm(adj_matrix, nfeat)
if aggregator_type == 'mean':
rst = rst/degrees.unsqueeze(1)
return rst
def forward(self, graphs, nfeat, efeat):
graphs = graphs.local_var()
rst = self.aggregate(graphs, nfeat, efeat, self.aggregator_type, self.self_loops)
# node feature updating
rst = self.update_feat(rst)
return rst
class GCNConvLayer(nn.Module):
def __init__(self, dataset_name, embed_dim):
super(GCNConvLayer, self).__init__()
self.project_node_feat = nn.Linear(embed_dim, embed_dim)
self.project_edge_feat = OGB_EdgeEncoder(dataset_name, embed_dim)
def forward(self, graphs, nfeat, efeat):
graphs = graphs.local_var()
degs = (graphs.in_degrees().float() + 1).to(graphs.device)
efeat = self.project_edge_feat(efeat)
graphs.ndata['h_n'] = nfeat
graphs.edata['h_e'] = efeat
graphs.update_all(fn.u_add_e('h_n', 'h_e', 'm'), fn.sum('m', 'neigh'))
rst = self.project_node_feat((nfeat + graphs.ndata['neigh']) / degs.view(-1, 1))
return rst
class GCN(nn.Module):
def __init__(self, embed_dim, output_dim, num_layer, args):
super(GCN, self).__init__()
self.num_layer = num_layer
self.norm_type = args.norm_type
# input layer
self.atom_encoder = OGB_NodeEncoder(args.dataset, embed_dim)
# middle layer. i.e., convolutional layer
self.conv_layers = nn.ModuleList()
self.norm_layers = nn.ModuleList()
for i in range(num_layer):
self.conv_layers.append(GCNConvLayer(args.dataset, embed_dim))
self.norm_layers.append(GNN_Norm(args.norm_type, embed_dim, affine=args.norm_affine))
# output layer
# self.predict = nn.Sequential(
# nn.Linear(embed_dim, embed_dim),
# nn.ReLU(),
# nn.Dropout(p=args.dropout),
# nn.Linear(embed_dim, output_dim)
# )
self.predict = nn.Linear(embed_dim, output_dim)
# other modules in GNN
self.activation = LocalActivation(args.activation)
self.dropout = nn.Dropout(args.dropout)
self.pooling = GlobalPooling(args.pool_type)
def forward(self, graphs, nfeat, efeat):
# initializing node features h_n
h_n = self.atom_encoder(nfeat)
self.conv_feature = []
self.norm_feature = []
self.norm_loss = torch.zeros(self.num_layer)
for layer in range(self.num_layer):
x = h_n
# conv_layer & norm layer
h_n = self.conv_layers[layer](graphs, h_n, efeat)
self.conv_feature.append(h_n)
h_n = self.norm_layers[layer](graphs, h_n)
self.norm_feature.append(h_n)
# activation
h_n = self.activation(h_n)
# h_n = h_n + x
h_n = self.dropout(h_n)
# pooling & prediction
g_n = self.pooling(graphs, h_n)
pre = self.predict(g_n)
return pre
| chenchkx/graph_prediction | models/GCN.py | GCN.py | py | 4,565 | python | en | code | 0 | github-code | 13 |
29247022346 |
import os
import requests
import pprint
import json
limitParam = 10 #There are ~4200 matches of Platinum, ~2500 matches of Diamond, ~500 matches of Master
leagueParam = 4 #3 for Platinum, 4 for Diamond, 5 for Master
targetPath = "../../gggreplays/" #Target path for replay files. Make sure the folder exists!
pp = pprint.PrettyPrinter(indent=4)
url = "https://gggreplays.com/api/v1/matches?average_league=" + str(leagueParam) + "&game_type=1v1&replay=true&vs_race=protoss&limit=" + str(limitParam)
response = requests.get(url)
myJson = json.loads(response.text)
# pp.pprint(myJson["collection"][0]["id"])
listLength = len(myJson["collection"])
for x in range(0, listLength):
id = myJson["collection"][x]["id"]
if os.path.isfile(targetPath + str(id) + ".SC2Replay"):
print("Skipped replay ", str(x), "of", listLength, ", file already exists.", " :: ", round((x/listLength * 100), 2), "%")
continue
urlDl = "https://gggreplays.com/matches/" + str(id) + "/replay"
print("Downloading replay ", urlDl, ", ", str(x), "of", listLength, " :: ", round((x/listLength * 100), 2), "%")
r = requests.get(urlDl, allow_redirects=True)
open(targetPath + str(id) + ".SC2Replay", "wb").write(r.content)
| JohnSegerstedt/DATX02-19-81 | gggreplays/getter.py | getter.py | py | 1,239 | python | en | code | 4 | github-code | 13 |
1430229989 | from distutils.core import setup
from distutils.extension import Extension
from Cython.Distutils import build_ext
ext_modules = [Extension('clpt_commons_bcn',
['clpt_commons_bcn.pyx'],
extra_compile_args=['/openmp',
'/O2', '/favor:INTEL64'],
)]
setup(
name = 'clpt_commons_bcn',
cmdclass = {'build_ext': build_ext},
ext_modules = ext_modules
)
| albertoferna/compmech | compmech/conecyl/clpt/setup_clpt_commons_bcn.py | setup_clpt_commons_bcn.py | py | 448 | python | en | code | null | github-code | 13 |
5156068393 | import pymongo
import dns
from scrapy.conf import settings
from scrapy.exceptions import DropItem
# totalrank logic
class MiaTotalPipeline(object):
canada = ['/piu.countryImg/031.png']
up = ['fa fa-caret-up']
down = ['fa fa-caret-down']
Lvtext = ['-']
toint = ['test']
blank = [' ']
def process_item(self, itemtotal, spider):
for data in self.toint:
if data not in itemtotal['totalscore']:
itemtotal['totalscore'] = int(itemtotal['totalscore'])
if data not in itemtotal['totalrank']:
itemtotal['totalrank'] = int(itemtotal['totalrank'])
for data in self.canada:
if data not in itemtotal['region']:
raise DropItem("Not Canadian")
for data in self.Lvtext:
if data in itemtotal['totalchange']:
itemtotal['totalchange'] = None
for data in self.up:
if data in itemtotal['tranksymbol']:
symbolup = "+"
itemtotal['totalchange'] = symbolup+itemtotal['totalchange']
for data in self.down:
if data in itemtotal['tranksymbol']:
symboldown = "-"
itemtotal['totalchange'] = symboldown+itemtotal['totalchange']
else:
connection = pymongo.MongoClient("mongodb+srv://piucanada:prima123@piucanada-frwhn.azure.mongodb.net/test?retryWrites=true")
self.collection = connection.piucanada.totalrank
self.collection.insert(itemtotal)
self.collection = connection.piucanada.totalrankhistory
self.collection.insert(itemtotal)
return itemtotal
# singlerank logic
class MiaSinglePipeline(object):
canada = ['/piu.countryImg/031.png']
up = ['fa fa-caret-up']
down = ['fa fa-caret-down']
Lvtext = ['-']
toint = ['test']
blank = [' ']
def process_item(self, itemsingle, spider):
for data in self.toint:
if data not in itemsingle['singlescore']:
itemsingle['singlescore'] = int(itemsingle['singlescore'])
if data not in itemsingle['singlerank']:
itemsingle['singlerank'] = int(itemsingle['singlerank'])
for data in self.canada:
if data not in itemsingle['region']:
raise DropItem("Not Canadian")
for data in self.Lvtext:
if data in itemsingle['singlechange']:
itemsingle['singlechange'] = None
for data in self.up:
if data in itemsingle['sranksymbol']:
symbolup = "+"
itemsingle['singlechange'] = symbolup+itemsingle['singlechange']
for data in self.down:
if data in itemsingle['sranksymbol']:
symboldown = "-"
itemsingle['singlechange'] = symboldown+itemsingle['singlechange']
else:
connection = pymongo.MongoClient("mongodb+srv://piucanada:prima123@piucanada-frwhn.azure.mongodb.net/test?retryWrites=true")
self.collection = connection.piucanada.singlerank
self.collection.insert(itemsingle)
self.collection = connection.piucanada.singlerankhistory
self.collection.insert(itemsingle)
return itemsingle
# doublerank logic
class MiaDoublePipeline(object):
canada = ['/piu.countryImg/031.png']
up = ['fa fa-caret-up']
down = ['fa fa-caret-down']
Lvtext = ['-']
toint = ['test']
def process_item(self, itemdouble, spider):
for data in self.toint:
if data not in itemdouble['doublescore']:
itemdouble['doublescore'] = int(itemdouble['doublescore'])
if data not in itemdouble['doublerank']:
itemdouble['doublerank'] = int(itemdouble['doublerank'])
for data in self.canada:
if data not in itemdouble['region']:
raise DropItem("Not Canadian")
for data in self.Lvtext:
if data in itemdouble['doublechange']:
itemdouble['doublechange'] = None
for data in self.up:
if data in itemdouble['dranksymbol']:
symbolup = "+"
itemdouble['doublechange'] = symbolup+itemdouble['doublechange']
for data in self.down:
if data in itemdouble['dranksymbol']:
symboldown = "-"
itemdouble['doublechange'] = symboldown+itemdouble['doublechange']
else:
connection = pymongo.MongoClient("mongodb+srv://piucanada:prima123@piucanada-frwhn.azure.mongodb.net/test?retryWrites=true")
self.collection = connection.piucanada.doublerank
self.collection.insert(itemdouble)
self.collection = connection.piucanada.doublerankhistory
self.collection.insert(itemdouble)
return itemdouble
# exprank logic
class MiaEXPPipeline(object):
canada = ['/piu.countryImg/031.png']
up = ['fa fa-caret-up']
down = ['fa fa-caret-down']
Lvtext = ['-']
toint = ['test']
def process_item(self, itemexp, spider):
for data in self.toint:
if data not in itemexp['expscore']:
itemexp['expscore'] = int(itemexp['expscore'])
if data not in itemexp['exprank']:
itemexp['exprank'] = int(itemexp['exprank'])
for data in self.canada:
if data not in itemexp['region']:
raise DropItem("Not Canadian")
for data in self.Lvtext:
if data in itemexp['expchange']:
itemexp['expchange'] = None
for data in self.up:
if data in itemexp['expranksymbol']:
symbolup = "+"
itemexp['expchange'] = symbolup+itemexp['expchange']
for data in self.down:
if data in itemexp['expranksymbol']:
symboldown = "-"
itemexp['expchange'] = symboldown+itemexp['expchange']
else:
connection = pymongo.MongoClient("mongodb+srv://piucanada:prima123@piucanada-frwhn.azure.mongodb.net/test?retryWrites=true")
self.collection = connection.piucanada.exprank
self.collection.insert(itemexp)
self.collection = connection.piucanada.exprankhistory
self.collection.insert(itemexp)
return itemexp | Cmindo/mia | pipelines.py | pipelines.py | py | 6,366 | python | en | code | 0 | github-code | 13 |
71875407377 | import matplotlib.pyplot as plt
import seaborn as sns
FIGSIZE = (13, 4)
FONTSIZE_TEXT = 16
COLS_NUM = 2
def metrics_str(data, metrics):
met_str = ''
for met in metrics:
met_str += met.__name__+': '
met_outcome = met(data)
met_str += "{:.4f}".format(met_outcome) + '\n'
return met_str
def show_transform_plot(data, trans_fun, fit_dist, metrics):
data_after = trans_fun(data)
sub_titles = ['original', (trans_fun.__name__+'-transformed')]
annotations = [metrics_str(data, metrics), metrics_str(data_after, metrics)]
comparison_dist_plots([data, data_after], data.name, sub_titles, fit_dist, annotations)
def comparison_dist_plots(data, main_title=None, sub_titles=None, fist_dist=None, annotations=None):
_, axes = plt.subplots(nrows=len(data)//COLS_NUM,
ncols=COLS_NUM, figsize=FIGSIZE)
plt.suptitle(main_title, fontsize=FONTSIZE_TEXT)
for ax, data_ax, title_ax, text_ax in zip(axes.flat, data, sub_titles, annotations):
ax.set_title(title_ax)
ax.text(.7, .85, text_ax, transform=ax.transAxes)
sns.distplot(data_ax, ax=ax, fit=fist_dist)
plt.subplots_adjust(wspace=0.2)
plt.show() | binkjakub/house-prices | notebooks/plot_utils.py | plot_utils.py | py | 1,209 | python | en | code | 0 | github-code | 13 |
16309628843 | #!/usr/bin/env python3
## https://github.com/zylai/ical-fake-meetings-generator/blob/master/iCal_Fake_Events_Generator.py
from pathlib import Path
import os.path
from os import path
import sys
from calendar import monthrange
from datetime import datetime,timedelta
import uuid
import random
import re
##### User-definable options, start editing here #####
file_location = str(Path.home()) + "/calendar.ics"
break_length_options = [-90,-60,-30,0,0,0,15,15,15,30,30,60,90]
event_length_options = [15,15,30,30,30,30,30,45,45,60,60,60,60,60,60,60,60,90,90,90,120,120,120,240]
def getArgs():
import argparse
parser = argparse.ArgumentParser("Calendar Creation")
parser.add_argument("-y","--year", help="The year", nargs="*", default=["2023"])
parser.add_argument("-m","--month", help="The month", nargs="*", default=["10"])
args,unknown = parser.parse_known_args()
return args
argz = getArgs()
##### Start of script, stop editing here #####
if path.exists(file_location):
print("\033[91mAn existing iCalendar file is already at: " + file_location + "\033[0m")
print("\033[91mDelete this file to generate another one. Aborting...\033[0m")
sys.exit(1)
user_input_year = argz.year
user_input_month = argz.month
regex_time = "^(([0-9])|([0-1][0-9])|([2][0-3]))([0-5][0-9])$"
user_input_begin_time = "0800"
user_input_end_time = "1700"
day_begin_hour = user_input_begin_time[0:2]
day_begin_minute = user_input_begin_time[2:4]
day_end_hour = user_input_end_time[0:2]
day_end_minute = user_input_end_time[2:4]
days_in_month = monthrange(int(user_input_year), int(user_input_month))[1]
utc_offset = str(datetime.now().astimezone().isoformat())[26:33]
calendar_template_begin = """BEGIN:VCALENDAR
PRODID:-//ZYLAI//iCal Fake Events Generator//EN
VERSION:2.0
METHOD:PUBLISH
"""
ics_file = open(file_location, "a")
ics_file.write(calendar_template_begin)
for day in range(1, days_in_month + 1):
day_date = datetime(int(user_input_year), int(user_input_month), day)
if ((day_date.strftime('%A') == "Saturday") or (day_date.strftime('%A') == "Sunday")):
continue
day_begin_utc = datetime.strptime("" + user_input_year + user_input_month.zfill(2) + str(day).zfill(2) + day_begin_hour + day_begin_minute + "00" + utc_offset, '%Y%m%d%H%M%S%z').utctimetuple()
day_end_utc = datetime.strptime("" + user_input_year + user_input_month.zfill(2) + str(day).zfill(2) + day_end_hour + day_end_minute + "00" + utc_offset, '%Y%m%d%H%M%S%z').utctimetuple()
day_begin = datetime(day_begin_utc.tm_year, day_begin_utc.tm_mon, day_begin_utc.tm_mday, day_begin_utc.tm_hour, day_begin_utc.tm_min, 0)
day_end = datetime(day_end_utc.tm_year, day_end_utc.tm_mon, day_end_utc.tm_mday, day_end_utc.tm_hour, day_end_utc.tm_min, 0)
event_end = day_begin
while (event_end < day_end):
break_length = timedelta(minutes = random.choice(break_length_options))
event_end = event_end + break_length
event_begin = event_end
event_length = timedelta(minutes = random.choice(event_length_options))
event_end = event_begin + event_length
event_template = """BEGIN:VEVENT
CLASS:PRIVATE
DESCRIPTION:
DTSTAMP:{}
DTSTART:{}
DTEND:{}
PRIORITY:9
SEQUENCE:0
TRANSP:OPAQUE
UID:{}
END:VEVENT
""".format(datetime.utcnow().strftime('%Y%m%dT%H%M%SZ'), event_begin.strftime('%Y%m%dT%H%M%SZ'), event_end.strftime('%Y%m%dT%H%M%SZ'), str(uuid.uuid4()))
ics_file.write(event_template)
calendar_template_end = "END:VCALENDAR"
ics_file.write(calendar_template_end)
ics_file.close
print("\033[92miCalendar file generated successfully and has been saved to: " + file_location + "\033[0m")
sys.exit(0) | franceme/staticpy | cal.py | cal.py | py | 3,582 | python | en | code | 0 | github-code | 13 |
73783663698 | #!usr/bin/env
from os import listdir
from os.path import isdir, join
import sys
import a
import time
import re
from download import Download
from db import DB
counter = 1
path = "crawlers"
crawlers = [f for f in listdir(path) if isdir(join(path, f))]
for option in crawlers:
print("%d - %s" % (counter, option))
counter += 1
#option = int(input(""))
option = 1
a = a.A(path + "." + crawlers[option-1])
next_page = True
number = 0
counter = 91
while next_page is not None and number <= 2500:
s = DB.select()
print("[+] Gathering links...")
links, next_page = a.get_download_links(counter+2)
print("Next page: " + str(next_page))
if links is not None:
print("[+] Downloading list of programs...")
for link in links:
flag = True
#print(link.link)
# print(link.version)
# print(link.downloads_total)
# print(link.downloads_last_week)
# print(link.info_link)
# if link.link is None:
#
# link.link = "None"
#
# f = open("log.txt", "a")
#
# f.write(str(link.version)+"\n")
# f.write(str(link.downloads_total)+"\n")
# f.write(str(link.downloads_last_week)+"\n")
# f.write(str(link.info_link)+"\n\n")
#
# f.close()
#
# flag = False
if flag:
h, name, path, save = Download.download(crawlers[option-1], link.link)
# if h is not False and save is not False:
#
# print("SUCESS")
#
# # TODO -> PROBABLE INFINIT LOOP... MUST CHECK WHY!
#
# data = []
# # name, download_link, download_date, origin_website, total_downloads, last_week_downloads, version, hash
# data = (name, link.link, time.strftime("%d/%m/%Y - %H:%M:%S"), crawlers[option-1], link.downloads_total,
# link.downloads_last_week, link.version, h, path)
#
# DB.insert(data)
#
# number += 1
#
# else:
#
# print("File already exists")
counter += 1
sys.exit("Done :)")
| LascaTorbot/crawler-surface | crawler.py | crawler.py | py | 2,452 | python | en | code | 0 | github-code | 13 |
363190571 | # Python_Intro
# Problem Set 3
#A series of exercises for CS50 hands-on projects
"""
This one's my approach to the "Grocery List" problem
"""
grocery = {} #A brand new dict
while True:
try:
item = input().upper()
if item in grocery:
grocery[item] += 1
else:
grocery[item] = 1
except EOFError:
for key in sorted(grocery.keys()):
print(grocery[key], key)
break
| JeremyJerez/Python_Set-_3 | grocery.py | grocery.py | py | 443 | python | en | code | 0 | github-code | 13 |
29060344413 | #!/usr/bin/env python
# -*- coding: UTF-8 -*-
'''
@Project :Pytorch-MTCNN-68-FACE
@File :RNetDataGenerator.py
@Author :huangxj
@Date :2023/9/13 15:48
'''
import os
import pickle
import sys
import cv2
import numpy as np
import torch
from tqdm import tqdm
# sys.path.append("../")
from Dataset.DataFormatConverter import convert_data
from Utiles.nms import py_nms
from Dataset.Utils.CombineDataList import combine_data_list, delete_old_img
from Dataset.Utils.CropLandmarkImage import crop_landmark_image
from Utiles.GenerateBox import generate_bbox
from Dataset.Utils.mydataset import read_annotation
from Utiles.utile import processed_image
from Dataset.Utils.get_landmark_from_lfw_neg import get_landmark_from_lfw_neg
from Utiles.utile import save_hard_example
# 模型路径
model_path = '../models'
device = torch.device("cpu")
# 获取PNet模型
pnet = torch.jit.load(os.path.join(model_path, 'PNet68.pth'))
pnet.to(device)
pnet.eval()
softmax_p = torch.nn.Softmax(dim=0)
# 使用PNet模型预测
def predict(infer_data):
# 添加待预测的图片
infer_data = torch.tensor(infer_data, dtype=torch.float32)
# 增加一个维度,使得(c,w,h)->(n,c,w,h)
infer_data = torch.unsqueeze(infer_data, dim=0)
infer_data = infer_data.to(device)
# 执行预测
cls_prob, bbox_pred, _ = pnet(infer_data)
# 降维,使得(n,c,w,h) -> (c,w,h)
cls_prob = torch.squeeze(cls_prob)
bbox_pred = torch.squeeze(bbox_pred)
# 激活函数
cls_prob = softmax_p(cls_prob)
return cls_prob.detach().cpu().numpy(), bbox_pred.detach().cpu().numpy()
def detect_pnet(im, min_face_size, scale_factor, thresh):
"""
使用图像金字塔进行人脸预测,预测结果通过nms确认
返回人脸检测框及对应的评分
:param im:
:param min_face_size:
:param scale_factor:
:param thresh:
:return:
"""
net_size = 12
# 人脸和输入图像的比率
current_scale = float(net_size) / min_face_size
im_resized = processed_image(im, current_scale) # 图像数据归一化
_, current_height, current_width = im_resized.shape
all_boxes = list()
# 图像金字塔
while min(current_height, current_width) > net_size:
# 类别和box
cls_cls_map, reg = predict(im_resized)
boxes = generate_bbox(cls_cls_map[1, :, :], reg, current_scale, thresh)
current_scale *= scale_factor # 继续缩小图像做金字塔
im_resized = processed_image(im, current_scale)
_, current_height, current_width = im_resized.shape
if boxes.size == 0:
continue
# 非极大值抑制留下重复低的box
keep = py_nms(boxes[:, :5], 0.7, mode='Union')
boxes = boxes[keep]
all_boxes.append(boxes)
if len(all_boxes) == 0:
return None
all_boxes = np.vstack(all_boxes)
# 将金字塔之后的box也进行非极大值抑制
keep = py_nms(all_boxes[:, 0:5], 0.7)
all_boxes = all_boxes[keep]
# box的长宽
bbw = all_boxes[:, 2] - all_boxes[:, 0] + 1
bbh = all_boxes[:, 3] - all_boxes[:, 1] + 1
# 对应原图的box坐标和分数
boxes_c = np.vstack([all_boxes[:, 0] + all_boxes[:, 5] * bbw,
all_boxes[:, 1] + all_boxes[:, 6] * bbh,
all_boxes[:, 2] + all_boxes[:, 7] * bbw,
all_boxes[:, 3] + all_boxes[:, 8] * bbh,
all_boxes[:, 4]])
boxes_c = boxes_c.T
return boxes_c
# 截取pos,neg,part三种类型图片并resize成24x24大小作为RNet的输入
def crop_24_box_image(data_path, filename, min_face_size, scale_factor, thresh):
# pos,part,neg裁剪图片放置位置
pos_save_dir = os.path.join(data_path, '24/positive')
part_save_dir = os.path.join(data_path, '24/part')
neg_save_dir = os.path.join(data_path, '24/negative')
# RNet数据地址
save_dir = os.path.join(data_path, '24/')
# 创建文件夹
if not os.path.exists(save_dir):
os.mkdir(save_dir)
if not os.path.exists(pos_save_dir):
os.mkdir(pos_save_dir)
if not os.path.exists(part_save_dir):
os.mkdir(part_save_dir)
if not os.path.exists(neg_save_dir):
os.mkdir(neg_save_dir)
# 读取标注数据,测试5个
data = read_annotation(data_path, filename)
all_boxes = []
landmarks = []
empty_array = np.array([])
# 使用PNet模型识别图片
for image_path in tqdm(data['images']):
assert os.path.exists(image_path), 'image not exists'
im = cv2.imread(image_path)
boxes_c = detect_pnet(im, min_face_size, scale_factor, thresh)
if boxes_c is None:
all_boxes.append(empty_array)
landmarks.append(empty_array)
continue
all_boxes.append(boxes_c)
# 把识别结果存放在文件中
save_file = os.path.join(save_dir, 'detections.pkl')
with open(save_file, 'wb') as f:
pickle.dump(all_boxes, f, 1)
save_hard_example(data_path, 24)
if __name__ == '__main__':
data_path = "D:\\PYprogram\\Pytorch-MTCNN-master\\dataset\\"
base_dir = os.path.join(data_path,'WIDER_train')
filename = os.path.join(data_path,'wider_face_train.txt')
min_face_size = 20
scale_factor = 0.79
thresh = 0.6
# 获取人脸的box图片数据
print('开始生成bbox图像数据')
crop_24_box_image(data_path, filename, min_face_size, scale_factor, thresh)
# 获取人脸关键点的数据
print('开始生成landmark图像数据')
# 获取lfw negbox,landmarks
lfw_neg_path = os.path.join(data_path, '300w_label.txt')
data_list = get_landmark_from_lfw_neg(lfw_neg_path, data_path)
crop_landmark_image(data_path, data_list, 24, argument=True)
# 合并数据列表
print('开始合成数据列表')
combine_data_list(os.path.join(data_path, '24'))
# 合并图像数据
print('开始合成图像文件')
convert_data(os.path.join(data_path, '24'), os.path.join(data_path, '24', 'all_data'))
# 删除旧数据
print('开始删除就得图像文件')
delete_old_img(data_path, 24)
| huangxiaojun1996/Pytorch-MTCNN-68-FACE | Dataset/RNetDataGenerator.py | RNetDataGenerator.py | py | 6,350 | python | en | code | 2 | github-code | 13 |
72711103378 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
AUTHOR: Geert Oosterbroek
DESCRIPTION:
Expanded nuclei approach to mRNA clustering,
current default method
"""
from read_roi import read_roi_zip
import pandas as pd
import numpy as np
import matplotlib.patches as patches
import matplotlib.pyplot as plt
import os
import sys
import pickle
sys.path.insert(1, r'C:\Users\geertoosterbroek\Documents\Thesis\Code\mRNA-2-nuclei\code')
from helper_functions import subset
class ExpandedNuclei:
"""
Implementation of mRNA clustering based on expanding pre determined nuclei ROIs
"""
def __init__(self):
self.count_data = None
self.cell_rois = None
self.labels = None
self.output_data = None
def fit(self, count_data, cell_rois):
"""
Perform clustering of mRNA molecules based on expanded nuclei ROIs
:parameters: self, instance of class
:return: self, fitted instance of class including cluster labels
"""
self.count_data = count_data
self.output_data = self.count_data.copy()
self.cell_rois = cell_rois
# By default points are classified as noise
labels = np.full(len(self.count_data), -1)
for ind, key in enumerate(self.cell_rois.keys()):
x, y = self.cell_rois[key]['x'], self.cell_rois[key]['y']
xy = np.column_stack((x, y))
poly = patches.Polygon(xy) # We first make a polygon to get a closed path
path = poly.get_path()
# Find which observations are in the closed path, i.e. inside the ROI region
inpath_mask = path.contains_points(count_data[['x', 'y']])
labels[inpath_mask] = ind
self.labels = labels
self.output_data['exp_nuc_labels'] = labels
return self
def main():
do_subset = True
data_dir = r"C:\Users\geertoosterbroek\Documents\Thesis\Data\507_s7"
input_directory = os.path.join(data_dir, "input")
count_data = pd.read_csv(os.path.join(input_directory, "507_s7 mRNA counts.csv"))
cell_rois = read_roi_zip(os.path.join(input_directory, "507_s7_all_cell_rois.zip"))
if do_subset:
count_data = subset(count_data)
expanded_nuclei = ExpandedNuclei().fit(count_data, cell_rois)
# Store labelled data in appropriate folders
output_dir = os.path.join(data_dir, "output")
if do_subset:
output_dir = os.path.join(output_dir, "subset")
# pickle.dump(clusterer, open(os.path.join(output_dir, "SEDEC_fitted.pkl"), "wb"))
expanded_nuclei.output_data.to_csv(os.path.join(output_dir, "exp_nuc_labelled_data.csv"))
if __name__ == '__main__':
main()
| GeertO97/mRNA_clustering | expanded_nuclei.py | expanded_nuclei.py | py | 2,677 | python | en | code | 0 | github-code | 13 |
37905801574 | from random import randint
def random_list():
s = [randint(a, b) for i in range(c)]
print(s)
a = int(input("Start the list: "))
b = int(input("Finish the list: "))
c = int(input("Number of items: "))
random_list() | PavloStakhyra/Pytonhomework | random_list.py | random_list.py | py | 224 | python | en | code | 0 | github-code | 13 |
37190882865 | from pyproj import CRS, Transformer
from owslib.wmts import TileMatrix, TileMatrixSet
def _convert_coordinates(longitude: float, latitude: float) -> tuple[float, float]:
"""Takes GPS coordinates (EPSG 4326) as input and converts them to the Mercator projection (EPSG 3857).
This function is used to easily find coordinates on a GPS-enabled system such as Google Maps or OpenStreetMap, and
to convert them to a format compatible with the WMTS format.
Args:
longitude (float): GPS longitude
latitude (float): GPS latitude
Returns:
x (float): Mercator longitude
y (float): Mercator latitude
"""
gps_crs = CRS("EPSG:4326")
mercator_crs = CRS("EPSG:3857")
coordinates_transformer = Transformer.from_crs(
gps_crs, mercator_crs, always_xy=True
)
x, y = coordinates_transformer.transform(longitude, latitude)
return x, y
def compute_tile_position(
matrix_set: TileMatrixSet, zoom_level: int, longitude: float, latitude: float
) -> tuple[int, int]:
"""Locates the tile containing the targeted location. It is located on a matrix set, i.e a grid containing multiples tiles
defined by their row and column. The row and the column are obtained by converting the width and length in meters: this operation
is performed by multiplicating by 0.00028 (number of meters represented by one pixel) and 256 (number of pixels along the width of the tile).
Since the width and the length are equal in terms of pixels (256), only the width is computed. However in the future both may be
computed to accomodate WMTS servers using rectangular tiles.
Args:
matrix_set (TileMatrixSet): the WMTS matrix set. There is one matrix set per zoom level. It contains rows and columns
zoom_level (int): zoom level to use on the WMTS server
longitude (float): the longitude of the point contained in the desired tile
latitude (float): the latitude of the point contained in the desired tile
Returns:
tile_row (int): the row of the desired tile in the matrix set
tile_column (int): the column of the desired tile in the matrix set
"""
# TileMatrixSet.tilematrix is a dictionnary containing the matrices !
matrix_set_dict: dict[str, TileMatrix] = matrix_set.tilematrix
target_matrix = matrix_set_dict[str(zoom_level)]
tile_width_meters = target_matrix.scaledenominator * 0.00028 * 256
print(f"Tile width in meters : {tile_width_meters}")
x0, y0 = map(lambda x: float(x), target_matrix.topleftcorner)
x, y = _convert_coordinates(longitude=longitude, latitude=latitude)
print(f"X = {x}, Y = {y}, identifier = {target_matrix.identifier}")
tile_col, tile_row = (x - x0) / tile_width_meters, (y0 - y) / tile_width_meters
tile_col, tile_row = int(round(tile_col, 0)), int(round(tile_row, 0))
return tile_row, tile_col
| louistransfer/object_detection_ign | object_detection_ign/wmts/utils.py | utils.py | py | 2,955 | python | en | code | 4 | github-code | 13 |
10428914828 | # Morse Code
from morse import morse_code
def morsify(string):
morsified = ""
for letter in string:
morsified += morse_code[letter]
return morsified
stringIn = input("Enter the string to Morsify: ")
string = stringIn.upper()
print(morsify(string))
| RajaAjayKumar/BasicPythonProjects | Coding Challenges/10 Morse Code.py | 10 Morse Code.py | py | 284 | python | en | code | 0 | github-code | 13 |
39989362679 | from __future__ import unicode_literals
from __future__ import print_function
import torchtext
from collections import defaultdict,Counter
import codecs
from itertools import count
PAD_WORD = '<blank>'
UNK = 0
BOS_WORD = '<s>'
EOS_WORD = '</s>'
USE_RL = False
def __getstate__(self):
return dict(self.__dict__, stoi=dict(self.stoi))
def __setstate__(self, state):
self.__dict__.update(state)
self.stoi = defaultdict(lambda: 0, self.stoi)
torchtext.vocab.Vocab.__getstate__ = __getstate__
torchtext.vocab.Vocab.__setstate__ = __setstate__
def get_fields():
fields = {}
fields["src"] = torchtext.data.Field(init_token=BOS_WORD,pad_token=PAD_WORD,include_lengths=True)
fields["tgt"] = torchtext.data.Field(init_token=BOS_WORD,eos_token=EOS_WORD,pad_token=PAD_WORD,include_lengths=True)
fields["tag"] = torchtext.data.Field(pad_token=PAD_WORD)
return fields
def load_fields(vocab):
vocab = dict(vocab)
fields = get_fields()
for k, v in vocab.items():
# Hack. Can't pickle defaultdict :(
v.stoi = defaultdict(lambda: 0, v.stoi)
if k in fields:
fields[k].vocab = v
return fields
def save_vocab(fields):
vocab = []
for k, f in fields.items():
if 'vocab' in f.__dict__:
f.vocab.stoi = dict(f.vocab.stoi)
vocab.append((k, f.vocab))
return vocab
def build_vocab(train, vocab):
fields = train.fields
vocab = dict(vocab)
fields["src"].vocab = vocab['src']
fields["tgt"].vocab = vocab['tgt']
fields["tag"].vocab = vocab['tag']
# OrderedIterator = torchtext.data.Iterator
def batch(data, batch_size, batch_size_fn=None):
"""Yield elements from data in chunks of batch_size."""
if batch_size_fn is None:
def batch_size_fn(new, count, sofar):
sofar+=1
return sofar
minibatch, size_so_far = [], 0
for ex in data:
if "_PAD" not in ex.tgt:
minibatch.append(ex)
size_so_far = batch_size_fn(ex, len(minibatch), size_so_far)
if size_so_far == batch_size and len(minibatch)==0:
minibatch, size_so_far = [], 0
if size_so_far == batch_size:
yield minibatch
minibatch, size_so_far = [], 0
elif size_so_far > batch_size:
yield minibatch[:-1]
minibatch, size_so_far = minibatch[-1:], batch_size_fn(ex, 1, 0)
if minibatch:
yield minibatch
class OrderedIterator(torchtext.data.Iterator):
def create_batches(self):
self.batches = batch(self.data(), self.batch_size, self.batch_size_fn)
class InferIterator(torchtext.data.Iterator):
def create_batches(self):
if self.train:
self.batches = torchtext.data.pool(
self.data(), self.batch_size,
self.sort_key, self.batch_size_fn,
random_shuffler=self.random_shuffler)
else:
self.batches = []
for b in torchtext.data.batch(self.data(), self.batch_size,
self.batch_size_fn):
self.batches.append(sorted(b, key=self.sort_key))
| timchen0618/LaPat | baseline/gmdr/biwei_dialog0/dialog0/Seq2SeqWithRL/IO.py | IO.py | py | 3,154 | python | en | code | 2 | github-code | 13 |
35219634825 | # -*- coding: utf-8 -*-
"""
Created on Sun Nov 20 15:16:50 2016
@author: Hanbin Seo
"""
### import data
import urllib.request
import numpy as np
X_train, y_train = None, None
url = "https://archive.ics.uci.edu/ml/machine-learning-databases/arcene/ARCENE/arcene_train.data"
with urllib.request.urlopen(url) as respone :
tmp_data = respone.read().split()
tmp_data = [int(val) for val in tmp_data]
print(sum(tmp_data), "\tCheck_sum value : 70726744.00")
X_train = np.array(tmp_data).reshape(len(tmp_data)/10000, 10000) # Number of features: 10000
print(len(X_train), "\tCount of Tot_ex : 100\n")
url = "https://archive.ics.uci.edu/ml/machine-learning-databases/arcene/ARCENE/arcene_train.labels"
with urllib.request.urlopen(url) as respone :
tmp_data = respone.read().split()
y_train = [int(val) for val in tmp_data]
print(len(y_train), "\tcount of Tot_ex : 100")
X_test, y_test = None, None
url = "https://archive.ics.uci.edu/ml/machine-learning-databases/arcene/ARCENE/arcene_valid.data"
with urllib.request.urlopen(url) as respone :
tmp_data = respone.read().split()
tmp_data = [int(val) for val in tmp_data]
print(sum(tmp_data), "\tCheck_sum value : 71410108.00")
X_test = np.array(tmp_data).reshape(len(tmp_data)/10000, 10000) # Number of features: 10000
print(len(X_test), "\tCount of Tot_ex : 100\n")
url = "https://archive.ics.uci.edu/ml/machine-learning-databases/arcene/arcene_valid.labels"
with urllib.request.urlopen(url) as respone :
tmp_data = respone.read().split()
y_test = [int(val) for val in tmp_data]
print(len(y_test), "\tcount of Tot_ex : 100")
### build k-neighClassifier
from sklearn.neighbors import KNeighborsClassifier
def neighClassifier(train_input, train_label, test_input, test_label) :
neigh = KNeighborsClassifier(n_neighbors=5)
neigh.fit(train_input, train_label)
res = {'acc_score':neigh.score(test_input, test_label), 'pred_val':neigh.predict(test_input)}
res['dimension'] = len(train_input.T)
return res
### apply k-NN classifier on overall data
result_dic = {'origin_data':neighClassifier(X_train, y_train, X_test, y_test)}
### build dimension reducer
import pandas as pd
from pandas import DataFrame as df
data_input = pd.concat([df(X_train), df(X_test)], keys=['train', 'test'])
from sklearn import decomposition
from sklearn import manifold
def demension_reducer(type_, n_dimension) :
reducer = None
if type_ is 'pca' :
reducer = decomposition.PCA(n_components=n_dimension)
elif type_ is 'kernel_pca' :
reducer = decomposition.KernelPCA(kernel='rbf', n_components=n_dimension) # because default kernel is 'linear'
elif type_ is 'isomap' :
reducer = manifold.Isomap(n_components=n_dimension)
elif type_ is 'lle' :
reducer = manifold.LocallyLinearEmbedding(n_components=n_dimension)
else :
print("you must define reducer and can choice one: ('pca', 'kernel_pca', 'isomap', 'lle')")
return None
try :
reduced_data = reducer.fit_transform(data_input)
reduced_train = df(reduced_data).loc[:len(X_test)-1,:]
reduced_test = df(reduced_data).loc[len(X_test):,:]
except ValueError as e :
print("\n\nError Message: ", e,"\nError occur @", (type_,n_dimension),"\n")
return reduced_train, reduced_test
### apply k-NN classigier on reduced data
dimensions = [10, 50, 100]
reducers = ['pca', 'kernel_pca', 'isomap', 'lle']
params_li = [('%s_%d'%(r,d) ,r,d) for r in reducers for d in dimensions]
for params in params_li :
k, r, d = params
t, v = demension_reducer(r,d)
result_dic[k] = neighClassifier(t, y_train, v, y_test)
result_dic[k]['reducer'] = r
result_df = df(result_dic).T
#result_df.loc[result_df['reducer']=='pca']
### visuaize result
result_vis = result_df.drop('origin_data')
result_vis['log_dim'] = result_vis['dimension'].apply(lambda x:np.log(x))
result_vis['exp_acc'] = result_vis['acc_score'].apply(lambda x:np.exp(x))
def vis_df(type_) :
tmp_df = result_vis.loc[result_vis['reducer']==type_].sort(['dimension'])
return tmp_df
x_min, x_max = result_vis['log_dim'].min()-0.1, result_vis['log_dim'].max()+0.1
y_min, y_max = result_vis['exp_acc'].min()-0.1, result_vis['exp_acc'].max()+0.1
xlin, ylin = np.linspace(x_min, x_max, 200), np.linspace(y_min, y_max, 200)
import matplotlib.pyplot as plt
plt.figure(figsize=(5,8))
plt.plot(xlin,np.exp([result_df['acc_score']['origin_data'] for i in range(200)]), '--', color = '0.65')
plt.scatter(result_vis['log_dim'], result_vis['exp_acc'])
for r in reducers :
plt.plot(vis_df(r)['log_dim'], vis_df(r)['exp_acc'], 'b--')
plt.text(vis_df(r)['log_dim'][0]+0.1, vis_df(r)['exp_acc'][0]+0.01, vis_df(r)['acc_score'][0])
plt.text(vis_df(r)['log_dim'][1]+0.1, vis_df(r)['exp_acc'][1]+0.01, r)
plt.xlabel("log_dim")
plt.ylabel("exp_acc")
plt.xlim(x_min, x_max)
plt.ylim(y_min, y_max)
plt.show()
| 5eo1ab/study4machine-learning | A8-dim_reduction/seo8.py | seo8.py | py | 4,942 | python | en | code | 1 | github-code | 13 |
17939276580 | from graia.ariadne.app import Ariadne
from graia.ariadne.event.message import GroupMessage
from graia.ariadne.message.chain import MessageChain
from graia.ariadne.message.element import *
from graia.ariadne.message.parser.twilight import Twilight, MatchResult
from graia.ariadne.model import Group, Member
from graia.saya import Saya, Channel
from graia.saya.builtins.broadcast.schema import ListenerSchema
from urllib.parse import quote
from playwright.async_api import async_playwright
channel = Channel.current()
channel.name("MoegirlInfo")
channel.description("获取萌娘百科上人物介绍卡片")
channel.author("I_love_study")
@channel.use(
ListenerSchema(
listening_events=[GroupMessage],
inline_dispatchers=[Twilight.from_command("萌娘百科 {para}")],
)
)
async def moegirl_search(app: Ariadne, group: Group, para: MatchResult):
url = "https://zh.moegirl.org.cn/zh-cn/" + quote(para.result.display.strip())
async with async_playwright() as p:
browser = await p.chromium.launch()
context = await browser.new_context(device_scale_factor=2.0)
page = await context.new_page()
await app.send_group_message(group, MessageChain([Plain("正在加载中,请稍后")]))
try:
# 因为萌娘有点慢所以给20s加载并且使用domcontentloaded(可能会出现加载不出来图片)
await page.goto(url, wait_until="domcontentloaded", timeout=20000)
except Exception:
await app.send_group_message(
group, MessageChain([Plain("错误:无法打开页面(可能是页面加载时间太久),请稍后再试")])
)
await browser.close()
return
await page.set_viewport_size({"width": 2560, "height": 1920})
selector = ["table.infobox2", "table.infobox", "div.infotemplatebox"]
for s in selector:
card = await page.query_selector(s)
if card is not None:
break
else:
await app.send_group_message(group, MessageChain("错误:找不到人物信息卡片"))
await browser.close()
return
clip = await card.bounding_box()
assert clip is not None
pic = await page.screenshot(clip=clip, type="png", full_page=True)
await browser.close()
await app.send_group_message(group, MessageChain(Image(data_bytes=pic)))
| Rainbow-Project/bot_rain_py | modules/moegirl_info.py | moegirl_info.py | py | 2,428 | python | en | code | 4 | github-code | 13 |
17953740790 | class Rectangle:
def CalculateArea(self):
# This function will accept input of length and breadth and calculate area
self.width=int(input("Enter Length:"))
self.height=int(input("Enter breadth:"))
area=self.width*self.height
print(area)
return (area)
def CalculatePerimeter(self):
# This function will accept input and calculate perimeter
perimeter=2*(self.width+self.height)
print(perimeter)
return (perimeter)
class Square:
def CalculateArea(self):
# This function will accept input side and calculate area
print("Enter side:")
self.s=float(input())
area=self.s*self.s
return(area)
def CalculatePerimeter(self):
# This function will accept input and calculate perimeter
perimeter=4*self.s
return(perimeter)
# Creating objects and calling the functions
c=Square()
x=c.CalculateArea()
y=c.CalculatePerimeter()
print("Area of square is=%f"%(x))
print("perimeter of square is=%f"%(y))
c=Rectangle()
x=c.CalculateArea()
y=c.CalculatePerimeter()
print("Area of rectangle is=%f"%(x))
print("Perimeter of rectangle is=%f"%(y))
| LakshitaNarsian/IT-TOOLS | class rectangle.py | class rectangle.py | py | 1,214 | python | en | code | 0 | github-code | 13 |
73693524817 | # -*- coding: utf-8 -*-
'''
Escreva a sua solução aqui
Code your solution here
Escriba su solución aquí
'''
distancia_total = int(input())
combustivel_gasto = float(input())
consumo = distancia_total / combustivel_gasto
print(f"{consumo:.3f} km/l") | AkiraTorres/beecrowd | Respostas/Python/1014.py | 1014.py | py | 256 | python | pt | code | 3 | github-code | 13 |
19241526997 | from django.core.management import BaseCommand
from django.db import transaction
from open_api.recuperation_facility import get_api_data
from ...models import Facility
# ./manage.py setapidata 실행
class Command(BaseCommand):
def handle(self, *args, **options):
try:
# transaction 을 사용해 Exception 발생 시, 이전 DB 상태에 영향 주지 않도록 처리
with transaction.atomic():
# facility 항목 모두 지운 후 진행
Facility.objects.all().delete()
# get_api_data() 로부터 시설 리스트를 받아옴
api_data = get_api_data()
# Facility obj 를 생성해 list 에 할당
# 만약 api_data 가 None 일 경우, 이 구문에서 TypeError 발생
objs = [Facility(**kwargs) for kwargs in api_data]
# bulk_create() 를 사용해 한번에 저장하도록 처리
Facility.objects.bulk_create(objs)
print('{} 개의 Facility 저장.'.format(Facility.objects.all().count()))
# TypeError: get_api_data() 에서 return 값이 없는 경우(open_api requests 실패)
except TypeError:
print("Can't set database")
| kimdohwan/Place-For-Elderly | app/facilities/management/commands/setapidata.py | setapidata.py | py | 1,265 | python | ko | code | 0 | github-code | 13 |
37856109203 | class Hero:
hp=0
power=0
name=""
def __init__(self, hp, power,name):
self.hp = hp
self.power = power
self.name = name
#回合格斗方法
def fight(self,enemy):
self.hp = self.hp - enemy.power
enemy.hp = enemy.hp -self.power
#我方胜利,输出英雄台词
if self.hp > enemy.hp:
print(f"{self.name}赢了")
self.speak_lines()
#对方胜利,输出对方台词
elif self.hp < enemy.hp:
print(f"{enemy.name}赢了")
enemy.speak_lines()
#打平,双方均输出台词
elif self.hp == enemy.hp:
print(f"双方打平~")
self.speak_lines()
enemy.speak_lines() | lqin007/testDemo | PythonPractice/heroPractice/hero.py | hero.py | py | 756 | python | en | code | 0 | github-code | 13 |
39686170092 | # Name: Downloading Files With Certutil
# RTA: certutil_webrequest.py
# ATT&CK: T1105
# Description: Uses certutil.exe to download a file.
import common
MY_DLL = common.get_path("bin", "mydll.dll")
@common.dependencies(MY_DLL)
def main():
# http server will terminate on main thread exit
# if daemon is True
server, ip, port = common.serve_web()
uri = "bin/mydll.dll"
target_file = "mydll.dll"
common.clear_web_cache()
url = "http://{ip}:{port}/{uri}".format(ip=ip, port=port, uri=uri)
common.execute(["certutil.exe", "-urlcache", "-split", "-f", url, target_file])
server.shutdown()
common.remove_file(target_file)
if __name__ == "__main__":
exit(main())
| endgameinc/RTA | red_ttp/certutil_webrequest.py | certutil_webrequest.py | py | 708 | python | en | code | 1,004 | github-code | 13 |
28703077444 | import pygame
from pygame import Rect
from pygame import Surface
import piece
from piece import *
class entities():
def __init__(self, _list = []):
self._list = _list
def addEntity(self, toAdd):
self._list.append(toAdd)
def getLength(self):
return len(self._list)
def getEntityByID(self, ID):
for i in self._list:
if i[0] == id:
return i
def removeEntity(self, ID):
if ID != None:
allitems = []
gotit = 0
for i in self._list:
if i[0] == ID and gotit == 0:
gotit = 1
else:
allitems.append(i)
self._list = allitems
| jyota/vHunter | entities.py | entities.py | py | 574 | python | en | code | 0 | github-code | 13 |
30583877128 | # -*- coding: utf-8 -*-
import hashlib
from copy import deepcopy
import numpy as np
from ge.bpmc.api.schemas.bpm import CriteriaModel, OverlayModel
from ge.bpmc.api.schemas.default import (AngleModel, BigIntModel, BooleanModel,
DoubleModel, DoublePointModel,
FloatModel, IntModel, LineModel,
NumberModel, PointModel,
SmallIntModel, StringModel,
TinyIntModel)
LATERALITY_MATCHING_KEY = 'laterality_match_uid'
VIEW_POSITION_MATCHING_KEY = 'view_position_match_uid'
DUMMY_PIXEL_RESULT = [
[255, 255, 255, 255],
[255, 255, 255, 255],
[255, 255, 255, 255],
[255, 255, 255, 255]
]
###############################
#
# TO BUSINESS
#
###############################
CRITERIA_MAPPING = {
'attribute_length_of_posterior_nipple_line': {
'key': 'length_of_posterior_nipple_line'},
'attribute_symmetry': {
'key': 'symmetry'},
'kpi_centricity': {
'key': 'centricity'},
'kpi_absence_of_breast_sagging': {
'key': 'absence_of_breast_sagging'},
'kpi_compression': {
'key': 'compression'},
'kpi_imf_horizontal_position_in_mm': {
'key': 'inframmary_fold_visible_vertical_distance',
'parent': 'inframmary_fold_visible'},
'kpi_imf_vertical_position_in_mm': {
'key': 'inframmary_fold_visible_horizontal_distance',
'parent': 'inframmary_fold_visible'},
'kpi_inframmary_fold_without_skin_folds_angle': {
'key': 'inframmary_fold_without_skin_folds_angle',
'parent': 'inframmary_fold_without_skin_folds'},
'kpi_inframmary_fold_without_skin_folds_radius': {
'key': 'inframmary_fold_without_skin_folds_radius',
'parent': 'inframmary_fold_without_skin_folds'},
'kpi_nipple_visible_in_profile': {
'key': 'nipple_visible_in_profile'},
'kpi_nipple_angle': {
'key': 'nipple_angle'},
'kpi_no_bottom_overlapping': {
'key': 'bottom_overlapping'},
'kpi_no_opposite_overlapping': {
'key': 'opposite_overlapping'},
'kpi_no_top_overlapping': {
'key': 'top_overlapping'},
'pectoral_muscle_angle': {
'key': 'pectoral_muscle_angle'},
'pectoral_muscle_visible_up_to_nipple_line': {
'key': 'pectoral_muscle_visible_up_to_nipple_line'},
'pectoral_muscle_width': {
'key': 'pectoral_muscle_width'},
'kpi_axillary_tail_area': {
'key': 'axillary_tail_visible_area',
'parent': 'axillary_tail_visible'},
'kpi_axillary_taildistance': {
'key': 'axillary_tail_visible_distance',
'parent': 'axillary_tail_visible'},
'kpi_transition_to_intermammary_cleft_area': {
'key': 'transition_to_intermammary_cleft_visible_area',
'parent': 'transition_to_intermammary_cleft_visible'},
'kpi_transition_to_intermammary_cleft_distance': {
'key': 'transition_to_intermammary_cleft_visible_distance',
'parent': 'transition_to_intermammary_cleft_visible'},
}
OVERLAY_MAPPING = {
'axillary_tail_point': {
'key': 'axillary_tail_visible'},
'imf_point': {
'key': 'inframmary_fold_visible'},
'intersection_point_pm_pnl': {
'key': 'pectoral_muscle_visible_up_to_nipple_line'},
'nipple_detected_point': {
'key': 'nipple_angle',
'aggregate': True,
'position': 0},
'nipple_ideal_point': {
'key': 'nipple_angle',
'aggregate': True,
'position': 1},
'no_bottom_overlapping': {
'key': 'bottom_overlapping'},
'no_opposite_overlapping': {
'key': 'opposite_overlapping'},
'no_top_overlapping': {
'key': 'top_overlapping'},
'pectoral_muscle_start_point': {
'key': 'pectoral_muscle_width',
'aggregate': True,
'position': 0},
'pectoral_muscle_end_point': {
'key': 'pectoral_muscle_width',
'aggregate': True,
'position': 1},
'transition_to_intermammary_cleft_point': {
'key': 'transition_to_intermammary_cleft_visible'}
}
def generate_default_dict(dict_, branch):
for k, v in branch.items():
if isinstance(v, dict) and 'type' not in v:
dict_[k] = generate_default_dict({}, v)
else:
dict_[k] = None
return dict_
CRITERIA_DEFAULT_DICT = generate_default_dict({}, CriteriaModel.properties)
OVERLAY_DEFAULT_DICT = generate_default_dict({}, OverlayModel.properties)
def to_point_model(scpt):
"""
Science ImagePoint to PointModel transformation utility
"""
if scpt is None:
return scpt
return PointModel(
x=int(round(scpt.x)),
y=int(round(scpt.y)))
def to_expected_output_type(value, output_type):
if not value:
return None
if output_type in (TinyIntModel, SmallIntModel, IntModel, BigIntModel):
return int(value) if not np.isnan(value) else None
if output_type in (NumberModel, FloatModel, DoubleModel):
return float(value) if not np.isnan(value) else None
if output_type is BooleanModel:
return bool(value)
if output_type is PointModel:
return to_point_model(value)
if output_type in (DoublePointModel, LineModel):
start, end = value.get(0), value.get(1)
return {
'start': to_point_model(start),
'end': to_point_model(end)}
if output_type is AngleModel:
start = value.get('start', {})
end = value.get('end', {})
line1_start, line1_end = start.get('start'), start.get('end')
line2_start, line2_end = end.get('start'), end.get('end')
return {
'start': {
'start': to_point_model(line1_start),
'end': to_point_model(line1_end)
},
'end': {
'start': to_point_model(line2_start),
'end': to_point_model(line2_end)
}}
def to_application_model(processed, mapping, default_dict, model):
"""
Morphs processing algorythm result dict to expected application dict
:processed: dict from business algorythm
:mapping: the mapping to use to select and extract data
:default_dict: the pre-filled dict with expected keys
:model: the Flask swagger Schema to use to identify data types
"""
# Filtering & filling result dict
filtered = {k: processed[k]
for k in processed if k in mapping}
results = deepcopy(default_dict)
for k in filtered:
destination = mapping.get(k)
parent = destination.get('parent', None)
key = destination['key']
is_agg = destination.get('aggregate', False)
agg_position = destination.get('position', 0)
if is_agg:
value = results[parent][key] if parent else results[key]
value = value if value else {}
value[agg_position] = filtered[k]
else:
value = filtered[k]
if parent:
results[parent][key] = value
else:
results[key] = value
# Sanitizing
for key, conf in {x['key']: x for x in mapping.values()}.items():
parent = conf.get('parent', None)
output_type = (model.properties
.get(parent, model.properties)
.get(key))
if parent:
results[parent][key] = to_expected_output_type(
results[parent][key], output_type)
else:
results[key] = to_expected_output_type(
results[key], output_type)
return results
def to_application_criteria(dict_):
return to_application_model(dict_, CRITERIA_MAPPING,
CRITERIA_DEFAULT_DICT, CriteriaModel)
def to_application_overlay(dict_):
return to_application_model(dict_, OVERLAY_MAPPING,
OVERLAY_DEFAULT_DICT, OverlayModel)
def to_processing_data(image):
"""
Transforms an Image into a business usable tuple for processing
:image: a "science" Image object
"""
return (
to_application_criteria(image.kpi),
to_application_overlay(image.display),
DUMMY_PIXEL_RESULT # image.pixel_array_original
)
def get_match_key(match):
return hashlib.md5(str(sorted(match['pair'])).encode('utf-8')).hexdigest()
def to_matching_data(procedure):
"""
Transforms a Procedure matcher result into a list of dicts for each unique
match
:procedure: a "science" Procedure matcher object
"""
mixed = (procedure.image_list_for_processing +
procedure.image_list_for_presentation)
results = []
for obj in mixed:
elt = obj.to_dict()
uid = elt['image_uid']
laterality_match = elt.get(LATERALITY_MATCHING_KEY)
vp_match = elt.get(VIEW_POSITION_MATCHING_KEY)
if laterality_match:
results.append({
'pair': [uid, laterality_match],
'criteria': {
'length_of_posterior_nipple_line':
elt.get('kpi_length_of_posterior_line')}
})
if vp_match:
results.append({
'pair': [uid, vp_match],
'criteria': {'symmetry': elt.get('kpi_symmetry')}
})
return {get_match_key(x): x for x in results}.values()
###############################
#
# TO SCIENCE & BEYOND
#
###############################
SC_IMAGE_MAPPING = {
'PixelArray': {
'origins': [{'key': 'image'}]},
'ManufacturerModelName': {
'origins': [{'key': 'model_name',
'parent': 'modality'}]},
'SoftwareVersions': {
'origins': [{'key': 'software_version',
'parent': 'modality'}]},
'Manufacturer': {
'origins': [{'key': 'manufacturer_name',
'parent': 'modality'}]},
'SOPClassUID': {
'origins': [{'key': 'sop_class'}]},
'BreastImplantPresent': {
'origins': [{'key': 'breast_implant_present'}]},
'ImageLaterality': {
'origins': [{'key': 'image_laterality'}]},
'AcquisitionDatetime': {
'origins': [{'key': 'acquisition_time'}]},
'ViewPosition': {
'origins': [{'key': 'view_position'}]},
'ImagerPixelSpacing': {
'aggregate': True,
'aggregation_mode': 'join_by_slash',
'origins': [{'key': 'horizontal',
'position': 0,
'parent': 'imager_pixel_spacing',
'entry': 'processing_data'},
{'key': 'vertical',
'position': 1,
'parent': 'imager_pixel_spacing',
'entry': 'processing_data'}]},
'Rows': {
'origins': [{'key': 'rows',
'parent': 'processing_data'}]},
'Columns': {
'origins': [{'key': 'columns',
'parent': 'processing_data'}]},
'PhotometricInterpretation': {
'origins': [{'key': 'photometric_interpretation',
'parent': 'processing_data'}]},
'PresentationIntentType': {
'origins': [{'key': 'presentation_intent_type',
'parent': 'processing_data'}]},
'BitsAllocated': {
'origins': [{'key': 'bits_allocated',
'parent': 'processing_data'}]},
'ImageType': {
'aggregate': True,
'aggregation_mode': 'single_item_join_by_slash',
'origins': [{'key': 'image_type',
'parent': 'processing_data',
'position': 0, }]},
'CompressionForce': {
'origins': [{'key': 'compression_force'}]},
'BodyPartExamined': {
'origins': [{'key': 'body_part_examined',
'parent': 'processing_data'}]},
'CollimatorShape': {
'origins': [{'key': 'collimator_shape',
'parent': 'processing_data'}]},
'CollimatorLeftVerticalEdge': {
'origins': [{'key': 'collimator_left_vertical_edge',
'parent': 'processing_data'}]},
'CollimatorLowerHorizontalEdge': {
'origins': [{'key': 'collimator_lower_horizontal_edge',
'parent': 'processing_data'}]},
'CollimatorRightVerticalEdge': {
'origins': [{'key': 'collimator_right_vertical_edge',
'parent': 'processing_data'}]},
'CollimatorUpperHorizontalEdge': {
'origins': [{'key': 'collimator_upper_horizontal_edge',
'parent': 'processing_data'}]}
}
# See ge.bpmc.services.workflow.WorkflowService.get_procedure_matching_data
SC_IMAGE_MATCHER_MAPPING = {
'acquisition_date': {
'origins': [{'key': 'acquisition_time'}]
},
'image_uid': {
'origins': [{'key': 'uid'}]
},
'presentation_intent_type': {
'origins': [{'key': 'presentation_intent_type',
'parent': 'processing_data'}]
},
'image_laterality': {
'origins': [{'key': 'image_laterality'}]
},
'view_position': {
'origins': [{'key': 'view_position'}]
},
'attribute_symmetry': {
'origins': [{'key': 'symmetry', 'parent': 'criteria'}]
},
'attribute_length_of_posterior_nipple_line': {
'origins': [{'key': 'length_of_posterior_nipple_line',
'parent': 'criteria'}]
}
}
def crawl_dict(dict_, keys):
if keys and dict_:
return crawl_dict(dict_.get(keys.pop(0), None), keys)
else:
return dict_
def to_science_model(payload, mapping=SC_IMAGE_MAPPING):
"""
Morphs computation request payload to a "science"-based model
:payload: Payload of a computation request.
See ge.bpmc.api.schemas.bpm.ModalityImageModel
"""
results = {}
for k in mapping:
conf = mapping[k]
origins = conf['origins']
aggregate = conf.get('aggregate', False)
aggregation_mode = conf.get('aggregation_mode', False)
for item in origins:
key = item['key']
parent = item.get('parent', None)
entry = item.get('entry', None)
position = item.get('position', 0)
value = crawl_dict(payload, [x for x in [entry, parent, key]
if x is not None])
if aggregate:
results.setdefault(k, {})[position] = value
else:
# This overrides previous configuration if multiples items
# exist in origins list and aggregation params are not properly
# set
results[k] = value
if aggregate and results[k]:
if aggregation_mode == 'single_item_join_by_slash':
# single_item is expected to be of iterable type
single_item = [x for x in results[k].values()][0]
results[k] = '/'.join([str(x) for x in single_item])
if aggregation_mode == 'join_by_slash':
results[k] = '/'.join(
[str(results[k][x]) for x in sorted(results[k].keys())])
return results
def to_science_image(payload):
return to_science_model(payload)
def to_science_image_match(match):
return to_science_model(match, mapping=SC_IMAGE_MATCHER_MAPPING)
def to_science_image_matches(matches):
return [to_science_image_match(x) for x in matches]
| dbenlopers/SANDBOX | misc/bpm_cloud/ge.bpmc/ge/bpmc/business/translator.py | translator.py | py | 15,434 | python | en | code | 0 | github-code | 13 |
69970886738 | import pygame
from castspellaction import *
from color import *
from gamestat import *
from actor import *
from spell import *
from vector import *
class Player(Actor):
def __init__(self, level):
Actor.__init__(self, level)
self.hp = GameStat(20)
self.mana = [GameStat(10) for _ in range(3)]
self.mana_gen = Color(1, 1, 1)
self.crystals = []
self.spells = []
for size in [2, 3, 4]:
self.spells.append(Spell(self, size))
self.next_action = None
self.heal_timer = 0
self.warp_to = None
# create the initial basic melee spell
basic = self.spells[0]
color = Color(True, True, True)
row, col = 1, 1
crystal = Crystal()
crystal.color = color
crystal.pipes = ['In', 'In', 'In', None, 'Out', None]
crystal.atts['Cast'] = ['Melee']
crystal.atts['Neutral'] = 1
crystal.atts['Mana'] = Color()
for _ in range(2):
crystal.rotate(-1)
basic.grid.cells[row][col] = crystal
def needs_input(self):
return self.next_action == None
def get_action(self):
action = self.next_action
self.next_action = None
return action
def update(self):
if self.heal_timer == 0:
self.heal_timer = 3
self.heal_timer = 0
self.hp.add(1)
for mana, delta in zip(self.mana, self.mana_gen):
mana.add(delta)
else:
self.heal_timer -= 1
def get_mana(self):
return Color([x.val for x in self.mana])
def burn_mana(self, mana_cost):
for (mana, cost) in zip(self.mana, mana_cost):
mana.sub(cost)
def default_attack(self, target, dir):
spell = self.spells[0]
action = CastSpellAction(self, spell, dir)
return action
def walk_to(self, loc, vel):
Actor.walk_to(self, loc, vel)
warp = self.level.get_warp(loc)
if warp is None:
return
self.warp_to = warp
| kotrenn/crystal | player.py | player.py | py | 2,061 | python | en | code | 0 | github-code | 13 |
477195461 | from rest_framework import status
from rest_framework.decorators import api_view
from rest_framework.response import Response
from .validators import currency_available_validator, empty_params_validator, date_validator, float_validator
from exchange_rate.utils import get_currency_rates, get_exchanged_currency_amount, get_time_weighted_rate_return
@api_view(['GET'])
def currency_rates(request):
""" Currency rates for a specific time period
Parameters: source_currency: source currency symbol/ date_from / date_to
Response: a time series list of rate values for each available Currency
"""
# Get params
currency_symbol = request.GET.get('source_currency')
date_from = request.GET.get('date_from')
date_to = request.GET.get('date_to')
# Validate params
empty_params_validator(currency_symbol, date_from, date_to)
source_currency = currency_available_validator(currency_symbol)
start_date = date_validator(date_from)
end_date = date_validator(date_to)
# Get data
data = get_currency_rates(source_currency, start_date, end_date)
return Response(data, status=status.HTTP_200_OK)
@api_view(['GET'])
def exchanged_currency_amount(request):
""" Calculates (latest) amount in a currency exchanged into a different currency.
Parameters: source_currency, amount, exchanged_currency.
Response: an object containing the rate value between source and exchanges currencies, along with the currencies.
"""
# Get params
source_currency_symbol = request.GET.get('source_currency')
exchanged_currency_symbol = request.GET.get('exchanged_currency')
amount = request.GET.get('amount')
# Validate params
empty_params_validator(source_currency_symbol, exchanged_currency_symbol, amount)
source_currency = currency_available_validator(source_currency_symbol)
exchanged_currency = currency_available_validator(exchanged_currency_symbol)
amount = float_validator(amount)
# Get data
data = get_exchanged_currency_amount(source_currency, exchanged_currency, amount)
return Response(data, status=status.HTTP_200_OK)
@api_view(['GET'])
def twr(request):
""" time-weighted rate of return for any given amount invested from a currency into another one from given date
until today:
Parameters: source_currency, amount, exchanged_currency, start_date
Response: an object containing the rate value between source and exchanges currencies along with the currencies and
start_date
"""
source_currency_symbol = request.GET.get('source_currency')
exchanged_currency_symbol = request.GET.get('exchanged_currency')
amount = request.GET.get('amount')
date_from = request.GET.get('date_from')
# Validate params
empty_params_validator(source_currency_symbol, exchanged_currency_symbol, amount, date_from)
source_currency = currency_available_validator(source_currency_symbol)
exchanged_currency = currency_available_validator(exchanged_currency_symbol)
start_date = date_validator(date_from)
amount = float_validator(amount)
# Get data
data = get_time_weighted_rate_return(source_currency, exchanged_currency, start_date, amount)
return Response(data, status=status.HTTP_200_OK)
| PedroDDiez/django-adapters | api/views.py | views.py | py | 3,265 | python | en | code | 1 | github-code | 13 |
35175424568 | import numpy as np
import bisect
class PeakInfo:
def __init__(self, name, prev_name, path, is_watched):
self.name = name
self.prev_name = prev_name
self.path = path
self.is_watched = is_watched
def __lt__(self, other):
if self.is_watched == other.is_watched:
if self.path < other.path:
result = True
else:
result = False
elif self.is_watched and not other.is_watched:
result = False
elif not self.is_watched and other.is_watched:
result = True
return result
def __str__(self):
return '(peak {}, path {}, is wathced {})'.format(self.name, self.path, self.is_watched)
def build_peaks_outputs_from_edges(peaks_list, edges_dict):
peaks = {}
for peak in peaks_list:
peaks[peak] = []
edges_keys = edges_dict.keys()
for edge in edges_keys:
peaks[edge[0]].append(edge[1])
return peaks
def paths_info(processed_list, start):
prev_dict = {}
paths_info_lines = []
for peak_info in processed_list:
prev_dict[peak_info.name] = peak_info.prev_name
for peak_info in processed_list:
path_peaks = []
current_peak = peak_info.name
while current_peak != None:
path_peaks.append(current_peak)
current_peak = prev_dict[current_peak]
path_line = ""
path_peaks = path_peaks[::-1]
for peak in path_peaks:
path_line += " - " + peak
path_line = path_line[3:]
paths_info_lines.append([path_line, peak_info.path])
return paths_info_lines
peaks_list = ['V_1', 'V_2', 'V_3', 'V_4']
start = 'V_1'
finish = 'V_4'
edges = {}
edges[('V_1', 'V_2')] = 1
edges[('V_1', 'V_3')] = 4
edges[('V_2', 'V_3')] = 2
edges[('V_3', 'V_2')] = 3
edges[('V_3', 'V_4')] = 1
peaks_outputs = build_peaks_outputs_from_edges(peaks_list, edges)
processing_list = [PeakInfo(start, None, 0, False)]
for peak in peaks_list:
if peak != start:
processing_list.append(PeakInfo(peak, None, np.inf, False))
processed_amount = 0
while processed_amount < len(peaks_list):
current_item = processing_list.pop(0)
current_item.is_watched = True
bisect.insort(processing_list, current_item)
next_peaks_names = peaks_outputs[current_item.name]
for next_peak_name in next_peaks_names:
next_item = list(filter(lambda x: x.name == next_peak_name, processing_list))[0]
if current_item.path + edges[(current_item.name, next_item.name)] < next_item.path:
processing_list.remove(next_item)
next_item.path = current_item.path + edges[(current_item.name, next_item.name)]
next_item.prev_name = current_item.name
bisect.insort(processing_list, next_item)
processed_amount += 1
paths_info_lines = paths_info(processing_list, start)
for info in paths_info_lines:
print("{} total coast: {}".format(info[0], info[1]))
| Sergey-Dvoraninovich/SAiIO | LR6.py | LR6.py | py | 2,977 | python | en | code | 0 | github-code | 13 |
12098598027 | # Problem 4:
# Largest Palindrome Product
#
# Description:
# A palindromic number reads the same both ways.
# The largest palindrome made from the product of
# two 2-digit numbers is 9009 = 91 × 99.
#
# Find the largest palindrome made from the product of two 3-digit numbers.
from math import ceil, floor
from typing import Tuple
def is_palindrome(x: int) -> bool:
"""
Return True iff `x` is a 'palindrome' (i.e. a palindromic number),
meaning its digits read the same both forwards and backwards.
Args:
x (int): Natural number
Returns:
(bool): True iff `x` is a palindrome
"""
# Choosing not to use cheap trick with str
digits = []
while x > 0:
digits.append(x % 10)
x //= 10
return digits == digits[::-1]
def main(n: int) -> Tuple[int, int, int]:
"""
Return a 3-tuple containing the largest palindrome made from the product of two `n`-digit numbers,
as well as the two factors themselves.
Note that this is specifically in decimal (base-10).
Args:
n (int): Natural number
Returns:
(Tuple[int, int, int]):
3-tuple of the largest palindromic product of two `n`-digit numbers as ...
* Factor `x`
* Factor `y`
* Product (x * y)
Raises:
AssertError: if incorrect params are given
"""
assert type(n) == int and n > 0
# Iterate downwards from the highest possible product... but in a ~cool~ way!
# Go through every potential pair of factors in the following way,
# choose the midpoint of the two factors,
# then step the factors away from the midpoint until one is out of bounds.
# Bounds
bnd_hi = 10 ** n - 1
bnd_lo = 10 ** (n-1)
# Start iterating midpoint from its highest possible value
mid = bnd_hi
while mid >= bnd_lo:
lo = floor(mid)
hi = ceil(mid)
while bnd_lo <= lo and hi <= bnd_hi:
prod = lo * hi
if is_palindrome(prod):
return lo, hi, prod
else:
lo -= 1
hi += 1
mid -= 0.5
return -1, -1, -1
if __name__ == '__main__':
d = int(input('Enter a number of digits: '))
p, q, r = main(d)
print('Largest palindromic product of two {}-digit numbers:'.format(d))
print(' {} * {} = {}'.format(d, p, q, r))
| mihiryerande/project-euler-004 | main.py | main.py | py | 2,411 | python | en | code | 0 | github-code | 13 |
32044834954 | import requests
import cv2
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
from IPython.display import display,Image,display_jpeg
from google.colab import drive
drive.mount('/content/drive/')
path = '/content/drive/My Drive/Colab Notebooks/youtube_app'
df = pd.read_table(path+'/JPvideos.csv', delimiter=",")
df_thumbnail_link = pd.DataFrame({'video_id': df['video_id'], 'thumbnail_link': df['thumbnail_link']})
df_thumbnail_link['thumbnail_link'] = df_thumbnail_link['thumbnail_link'].str.replace('default.jpg', 'maxresdefault.jpg')
df_thumbnail_link.head(3)
# https://sonaeru-blog.com/image-ai/#i-3
class Youtube_DB:
def __init__(self, ):
self.db = [[], [], [], [], []]
def img_download(self, video_id, thumbnail_link):
response = requests.get(thumbnail_link, allow_redirects=False)
if response.status_code == 200:
img = response.content
display_jpeg(Image(img))
def add_user_db(self, user_id):
nan = float("nan")
def print_db(self, ):
print([print(len(i)) for i in self.db])
print(self.db)
def download_img(url, file_name):
# print(url, file_name)
r = requests.get(url, stream=True)
if r.status_code == 200:
with open(path+'/imagiai/images/' +file_name+'.jpg', 'wb') as f:
f.write(r.content)
download_list.append(file_name)
download_list = []
for i in range(20):
download_img(df_thumbnail_link['thumbnail_link'][i], df_thumbnail_link['video_id'][i])
print(download_list)
| tsurusekazuki/YouTure-prot | models/youtube/download-thumbnail.py | download-thumbnail.py | py | 1,545 | python | en | code | 0 | github-code | 13 |
13513989946 | from ebcli.core.abstractcontroller import AbstractBaseController
from ebcli.resources.strings import strings, flag_text
from ebcli.operations import listops
class ListController(AbstractBaseController):
class Meta:
label = 'list'
description = strings['list.info']
usage = 'eb list [options ...]'
arguments = [
(['-a', '--all'], dict(action='store_true',
help=flag_text['list.all']))
]
def do_command(self):
all_apps = self.app.pargs.all
if not all_apps:
app_name = self.get_app_name()
else:
app_name = None
verbose = self.app.pargs.verbose
listops.list_env_names(app_name, verbose, all_apps)
| aws/aws-elastic-beanstalk-cli | ebcli/controllers/list.py | list.py | py | 756 | python | en | code | 150 | github-code | 13 |
35857081437 | #!/usr/bin/python3
#(c) 2017 Todd Riemenschneider
#
#Enable Multiprocessing
from multiprocessing import Pool
#getpass will not display password
from getpass import getpass
#ConnectionHandler is the function used by netmiko to connect to devices
from netmiko import ConnectHandler
#Time tracker
from time import time
#create variables for username and password
#create variables for configs and hosts
uname = input("Username: ")
passwd = getpass("Password: ")
# Interactively add the hosts
#host = input("Enter the host IPs seperate with space: ")
#This will allow you to just press enter
#This sets default values Not recommanded in any place but a lab
if len(uname) < 1 : uname = "admin"
if len(passwd) < 1 : passwd = "automate"
#This will put hosts and commands entered into list format
#hosts = host.split()
# To manually add hosts to scripts just follow the format below
#hosts = ["1.1.1.1", "2.2.2.2", "3.3.3.3", "4.4.4.4"]
# use hostfile
with open('host_file.txt') as f:
hosts = f.read().splitlines()
starting_time = time()
#Each member of the pool of 5 will be run through this function
def run_script(host_ip):
ios_rtr = {
"device_type": "cisco_ios",
"ip": host_ip,
"username": uname,
"password": passwd,
}
err_host = []
nl = "\n"
#connect to the device via ssh
try:
net_connect = ConnectHandler(**ios_rtr)
host_name = net_connect.find_prompt()
# This is the first command issued
output1 = net_connect.send_command('show version', use_textfsm=False)
print("Connected to host:", host_ip)
print(output1)
host_id = "Connected to host: " + host_ip
print('\n---- Elapsed time=', time()-starting_time)
# This is the second command issued
output2 = net_connect.send_command('show cdp nei', use_textfsm=False)
print("Connected to host:", host_ip)
print(output2)
host_id = "Connected to host: " + host_ip
print('\n---- Elapsed time=', time()-starting_time)
# This is the third command issued
output3 = net_connect.send_command('show inventory', use_textfsm=False)
print("Connected to host:", host_ip)
print(output3)
host_id = "Connected to host: " + host_ip
print('\n---- Elapsed time=', time()-starting_time)
# This is the fourth command issued
output4 = net_connect.send_command('show run', use_textfsm=False)
print("Connected to host:", host_ip)
print(output4)
host_id = "Connected to host: " + host_ip
print('\n---- Elapsed time=', time()-starting_time)
with open(host_ip + "_discovery_file.txt", 'w') as file:
file.write(host_id)
file.write(nl)
file.write(host_name)
file.write(nl)
file.write(output1)
file.write(nl)
file.write(host_id)
file.write(nl)
file.write(host_name)
file.write(nl)
file.write(output2)
file.write(nl)
file.write(host_id)
file.write(nl)
file.write(host_name)
file.write(nl)
file.write(output3)
file.write(nl)
file.write(host_id)
file.write(nl)
file.write(host_name)
file.write(nl)
file.write(output4)
file.write(nl)
except Exception as unknown_error:
# Error handling - Print output to screen
print("************************************")
print("Unable to log into this device:", host_ip)
print(unknown_error)
print("************************************")
# Error handling - record to file
with open("Connection_Errors", "a") as err_log:
err_log.write("Error connecting to the following devices")
err_log.write(nl)
err_log.write(str(unknown_error))
err_log.write(nl)
err_log.write(host_ip)
err_log.write(nl)
if __name__ == "__main__":
# Pool(5) means 5 process will be run at a time, more hosts will go in the next group
with Pool(5) as p:
print(p.map(run_script, hosts))
| twr14152/Network-Automation-Scripts_Python3 | netmiko/NetworkDiscovery/host_file_and_script/archive/discovery_script.py | discovery_script.py | py | 4,223 | python | en | code | 52 | github-code | 13 |
2929216459 | import json
from functools import partial
from tornado.httpclient import AsyncHTTPClient
GEMS_URL = 'https://rubygems.org/api/v1/versions/%s.json'
def get_version(name, conf, callback):
repo = conf.get('gems') or name
url = GEMS_URL % repo
AsyncHTTPClient().fetch(url, user_agent='lilydjwg/nvchecker',
callback=partial(_gems_done, name, callback))
def _gems_done(name, callback, res):
data = json.loads(res.body.decode('utf-8'))
version = data[0]['number']
callback(name, version)
| amazingfate/nvchecker | nvchecker/source/gems.py | gems.py | py | 523 | python | en | code | null | github-code | 13 |
73727892176 | '''
> shortest_path(map_40, 5, 34)
[5, 16, 37, 12, 34]
'''
import math
def shortest_path(M,start,goal):
### data in M
## 1. M.intersections - dict - x,y coordinate of every node.
# {0: [0.7801603911549438, 0.49474860768712914],
# 1: [0.5249831588690298, 0.14953665513987202],
# 2: [0.8085335344099086, 0.7696330846542071],
# ......
# 37: [0.3345284735051981, 0.6569436279895382],
# 38: [0.17972981733780147, 0.999395685828547],
# 39: [0.6315322816286787, 0.7311657634689946]}
## 2. M.roads - list - roads[i] contains a list of the intersections that node i connects to.
# [[36, 34, 31, 28, 17],
# [35, 31, 27, 26, 25, 20, 18, 17, 15, 6],
# [39, 36, 21, 19, 9, 7, 4],
# ......
# [12, 16, 22, 29],
# [23, 29, 32],
# [2, 4, 7, 22, 28, 36]]
## initialize
frontier = set()
known = set()
path = []
## calculate distance between two nodes
def distance(i,j):
return math.sqrt((M.intersections[i][0] - M.intersections[j][0])**2 + (M.intersections[i][1] - M.intersections[j][1])**2)
## initialize start node
node = {}
node[start] = {'g': 0,
'h': distance(start, goal),
'f': 0 + distance(start, goal),
'parent': None }
## add start to frontier
frontier.add(start)
## choose the node with minimal f in frontier
def choose_search_node(frontier):
## get the f values of i in frontier
f_frontier = {}
for i in frontier:
f_frontier[i] = node[i]['f']
## return the node number that has minimal f
return min(f_frontier, key=f_frontier.get)
## search the neighbors of node i with minimal f, updating g and f of it's neighbors
## adding neighbors to frontier, remove i from frontier to known
def search(i):
for j in M.roads[i]:
## choose neighbors that is not in known
if j not in known:
## if j hasn't been searched before, initialize j
if j not in frontier:
node[j] = {'g': node[i]['g'] + distance(i, j),
'h': distance(j, goal),
'f': node[i]['g'] + distance(i, j) + distance(j, goal),
'parent': i }
## if j has been searched, update node j if f of j is getting smaller
else:
if (node[i]['g'] + distance(i, j) + distance(j, goal)) < node[j]['f']:
node[j] = {'g': node[i]['g'] + distance(i, j),
'h': distance(j, goal),
'f': node[i]['g'] + distance(i, j) + distance(j, goal),
'parent': i }
## add j to frontier
frontier.add(j)
## move i from frontier to known
frontier.remove(i)
known.add(i)
return frontier, known
while frontier != ():
## choose search node in frontier
i = choose_search_node(frontier)
## search in i's neighbors, updating frontier and known
frontier, known = search(i)
## if goal is in frontier, and goal has the minimal f, then the path is found, break the loop
if goal in known:
break
## if find a shortest path to goal, then trace back to start, return the path
if goal in known:
i = goal
while i:
path.append(i)
i = node[i]['parent']
path.reverse()
return path
## if the frontier is empty, yet the goal hasn't been reached, return path not found
if frontier == [] and i != goal:
return "path not find"
| Xing-Kai/Intro_to_Self_Driving_Car | 6_Navigating_Data_Structures/Project_Implement_Route_Planner/student_code_02.py | student_code_02.py | py | 3,143 | python | en | code | 1 | github-code | 13 |
72755316178 | import json
from django.http import HttpResponse
from hashlib import md5
from scratch_api.models import FavoriteProduction, User, Production,Gallery, LikeProduction, CommentEachOther,FavoriteGallery,LikeGallery
def website_ajax_favorite(request, production):
"""
ajax favorite a production
:param request:
:param production: production object
:return: a json of result
"""
c = {}
baseuser = request.user
user = User.objects.get(username=baseuser)
if FavoriteProduction.objects.filter(user=user, production=production).exists():
# if have favorite, delete it
FavoriteProduction.objects.get(user=user, production=production).delete()
else:
FavoriteProduction(user=user, production_id=production).save()
response_data = {}
response_data['result'] = 'Success!'
return HttpResponse(json.dumps(response_data), content_type="application/json")
def website_ajax_like(request, production):
"""
similar to website_ajax_favorite
:param request:
:param production:
:return:
"""
production = Production.objects.get(pk=production)
user = request.user
response_data = {}
try:
if user.is_anonymous():
ip_address = request.META.get('HTTP_X_FORWARDED_FOR', request.META['REMOTE_ADDR'])
s = u"".join((ip_address, request.META.get('HTTP_USER_AGENT', '')))
token = md5(s.encode('utf-8')).hexdigest()
if not LikeProduction.objects.filter(user=None, token=token, production=production).exists():
LikeProduction(user=None, token=token, production=production).save()
production.like += 1
production.save(update_fields=('like',))
else:
if not LikeProduction.objects.filter(user=user, token=None, production=production):
LikeProduction(user=user, token=None, production=production).save()
production.like += 1
production.save(update_fields=('like',))
response_data['result'] = 'Success!'
except Exception as e:
print(e)
response_data['result'] = 'Fail!'
return HttpResponse(json.dumps(response_data), content_type="application/json")
def website_ajax_comment_eachother(request, production):
"""
:param request:
:param production: production object
:return: a json of result
"""
c = {}
response_data = {}
baseuser = request.user
score = int(request.GET.get("score"))
try:
user = User.objects.get(username=baseuser)
if CommentEachOther.objects.filter(user=user, production=production).exists():
#减去之前的评分 加上新的评分
project = Production.objects.get(pk=production)
project.comment_eachother_all_score = project.comment_eachother_all_score - CommentEachOther.objects.get(user=user, production=production).comment_score
project.comment_eachother_all_score = project.comment_eachother_all_score + score
project.save(update_fields=("comment_eachother_all_score", ))
p = CommentEachOther.objects.get(user=user, production=production)
p.comment_score = score
p.save()
else:
#直接加上新的评分
project = Production.objects.get(pk=production)
CommentEachOther.objects.create(user=user, production=project, comment_score=score)
project.comment_eachother_all_score = project.comment_eachother_all_score + score
project.save(update_fields=("comment_eachother_all_score", ))
response_data['result'] = 'Success!'
except Exception as e:
print(e)
response_data['result'] = 'Fail!'
return HttpResponse(json.dumps(response_data), content_type="application/json")
def website_inbox_readall(request):
"""
mark readall in inbox
:param request:
:return:
"""
c = {}
user = request.user
user.notifications.mark_all_as_read()
response_data = {}
response_data['result'] = 'Success!'
return HttpResponse(json.dumps(response_data), content_type="application/json")
def website_ajax_gallery_favorite(request, gallery):
"""
ajax favorite a gallery
:param request:
:param production: gallery object
:return: a json of result
"""
c = {}
baseuser = request.user
user = User.objects.get(username=baseuser)
if FavoriteGallery.objects.filter(user=user, gallery=gallery).exists():
# if have favorite, delete it
FavoriteGallery.objects.get(user=user, gallery=gallery).delete()
else:
FavoriteGallery(user=user, gallery_id=gallery).save()
response_data = {}
response_data['result'] = 'Success!'
return HttpResponse(json.dumps(response_data), content_type="application/json")
def website_ajax_gallery_like(request, gallery):
"""
similar to website_ajax_gallery_favorite
:param request:
:param production:gallery object
:return:
"""
gallery = Gallery.objects.get(pk=gallery)
user = request.user
response_data = {}
try:
if user.is_anonymous():
ip_address = request.META.get('HTTP_X_FORWARDED_FOR', request.META['REMOTE_ADDR'])
s = u"".join((ip_address, request.META.get('HTTP_USER_AGENT', '')))
token = md5(s.encode('utf-8')).hexdigest()
print(token)
if not LikeGallery.objects.filter(user=None, token=token, gallery=gallery).exists():
LikeGallery(user=None, token=token, gallery=gallery).save()
gallery.like += 1
gallery.save(update_fields=('like',))
else:
if not LikeGallery.objects.filter(user=user, token=None, gallery=gallery):
LikeGallery(user=user, token=None, gallery=gallery).save()
gallery.like += 1
gallery.save(update_fields=('like',))
response_data['result'] = 'Success!'
except Exception as e:
print(e)
response_data['result'] = 'Fail!'
return HttpResponse(json.dumps(response_data), content_type="application/json")
def update_ajax_liveTimeSum(request,productionid,liveTimeSum):
"""
ajax update the liveTimeSum
:param request:
:param time: liveTimeSum,liveTimeSum
:return: a json of result
"""
print("productionid="+productionid)
response_data = {}
production = Production.objects.get(pk=productionid)
user = User.objects.get(pk = production.author_id)
production.production_duration += int(liveTimeSum) #记录的时间单位是毫秒
production.save(update_fields=('production_duration',))
print("production.production_duration增加了:" + str(liveTimeSum))
print("production.production_duration=" + str(production.production_duration))
user.coding_duration +=int(liveTimeSum)
user.save(update_fields=('coding_duration',))
print("user.coding_duration=" + str(user.coding_duration))
response_data['result'] = 'Success!'
return HttpResponse(json.dumps(response_data), content_type="application/json") | liqiniuniu/- | website/ajax_views.py | ajax_views.py | py | 7,129 | python | en | code | 0 | github-code | 13 |
71698214098 | from flask import Flask, render_template, request
from textblob import TextBlob
app = Flask(__name__)
def predict_sentiment(text):
analysis = TextBlob(text)
# Use TextBlob's polarity to predict sentiment
if analysis.sentiment.polarity > 0:
return 'Positive'
elif analysis.sentiment.polarity == 0:
return 'Neutral'
else:
return 'Negative'
@app.route('/', methods=['GET', 'POST'])
def index():
if request.method == 'POST':
text = request.form['textInput']
sentiment = predict_sentiment(text)
return render_template('index.html', sentiment=sentiment)
return render_template('index.html')
if __name__ == "_main_":
app.run(debug=True) | smartinternz02/SI-GuidedProject-612159-1699512337 | app.py | app.py | py | 737 | python | en | code | 0 | github-code | 13 |
37503651959 | #!/usr/bin/env python3
def calculate():
limit = 10**6
solutions = [0] * limit
for i in range(1, limit * 2):
for j in range(i // 5 + 1, (i + 1) // 2):
temp = (i - j) * (j * 5 - i)
if temp >= limit:
break
solutions[temp] += 1
answer = solutions.count(10)
return str(answer)
if __name__ == "__main__":
print(calculate())
| sayantan3/project-euler | pep_135.py | pep_135.py | py | 410 | python | en | code | 0 | github-code | 13 |
30138679522 | from tests.source.shotgun.base import ShotgunTestCase
from zou.app.models.project import Project
from zou.app.models.person import Person
from zou.app.models.task_type import TaskType
from zou.app.models.task_status import TaskStatus
from zou.app.services import assets_service, shots_service, tasks_service
class ImportShotgunTaskTestCase(ShotgunTestCase):
def setUp(self):
super(ImportShotgunTaskTestCase, self).setUp()
self.load_fixture("persons")
self.load_fixture("projects")
self.load_fixture("status")
self.load_fixture("steps")
self.load_fixture("assets")
self.load_fixture("sequences")
self.load_fixture("shots")
self.load_fixture("scenes")
def load_task(self):
self.sg_task = {
"cached_display_name": "Shading",
"created_by": {"id": 1, "name": "John Doe", "type": "HumanUser"},
"due_date": None,
"duration": 7200,
"entity": {"id": 1, "name": "Sheep", "type": "Asset"},
"id": 20,
"project": {"id": 1, "name": "Agent327", "type": "Project"},
"sg_description": "test description",
"sg_sort_order": None,
"sg_status_list": "wip",
"start_date": None,
"step": {"id": 1, "name": "Modeling Shading", "type": "Step"},
"task_assignees": [
{"id": 2, "name": "Ema Peel", "type": "HumanUser"}
],
"type": "Task",
}
api_path = "/import/shotgun/tasks"
self.tasks = self.post(api_path, [self.sg_task], 200)
def load_sequence_task(self):
self.sg_task = {
"cached_display_name": "Previz",
"created_by": {"id": 1, "name": "John Doe", "type": "HumanUser"},
"due_date": None,
"duration": 7200,
"entity": {"id": 1, "name": "S01", "type": "Sequence"},
"id": 20,
"project": {"id": 1, "name": "Agent327", "type": "Project"},
"sg_description": "test description",
"sg_sort_order": None,
"sg_status_list": "wip",
"start_date": None,
"step": {"id": 1, "name": "Modeling Shading", "type": "Step"},
"task_assignees": [
{"id": 2, "name": "Ema Peel", "type": "HumanUser"}
],
"type": "Task",
}
api_path = "/import/shotgun/tasks"
self.tasks = self.post(api_path, [self.sg_task], 200)
def load_scene_task(self):
self.sg_task = {
"cached_display_name": "Layout",
"created_by": {"id": 1, "name": "John Doe", "type": "HumanUser"},
"due_date": None,
"duration": 7200,
"entity": {"id": 1, "name": "SC01", "type": "Scene"},
"id": 20,
"project": {"id": 1, "name": "Agent327", "type": "Project"},
"sg_description": "test description",
"sg_sort_order": None,
"sg_status_list": "wip",
"start_date": None,
"step": {"id": 3, "name": "Layout", "type": "Step"},
"task_assignees": [
{"id": 2, "name": "Ema Peel", "type": "HumanUser"}
],
"type": "Task",
}
api_path = "/import/shotgun/tasks"
self.tasks = self.post(api_path, [self.sg_task], 200)
def test_import_tasks(self):
self.tasks = self.load_fixture("tasks")
self.assertEqual(len(self.tasks), 2)
self.tasks = self.get("data/tasks")
self.assertEqual(len(self.tasks), 2)
def test_import_tasks_twice(self):
self.tasks = self.load_fixture("tasks")
self.tasks = self.load_fixture("tasks")
self.tasks = self.get("data/tasks")
self.assertEqual(len(self.tasks), 2)
def test_import_task(self):
self.load_task()
self.assertEqual(len(self.tasks), 1)
self.tasks = self.get("data/tasks")
self.assertEqual(len(self.tasks), 1)
task = self.tasks[0]
task = tasks_service.get_task_with_relations(task["id"])
project = Project.get_by(name=self.sg_task["project"]["name"])
task_type = TaskType.get_by(name=self.sg_task["step"]["name"])
task_status = TaskStatus.get_by(
short_name=self.sg_task["sg_status_list"]
)
assets = assets_service.get_assets(
{"shotgun_id": self.sg_task["entity"]["id"]}
)
entity = assets[0]
assigner = Person.get_by(
last_name=self.sg_task["created_by"]["name"].split(" ")[1]
)
assignee = Person.get_by(
last_name=self.sg_task["task_assignees"][0]["name"].split(" ")[1]
)
self.assertEqual(task["name"], self.sg_task["cached_display_name"])
self.assertEqual(task["duration"], self.sg_task["duration"])
self.assertEqual(task["shotgun_id"], self.sg_task["id"])
self.assertEqual(task["project_id"], str(project.id))
self.assertEqual(task["task_type_id"], str(task_type.id))
self.assertEqual(task["task_status_id"], str(task_status.id))
self.assertEqual(task["entity_id"], entity["id"])
self.assertEqual(task["assigner_id"], str(assigner.id))
self.assertEqual(task["assignees"][0], str(assignee.id))
def test_import_sequence_task(self):
self.load_sequence_task()
sequences = shots_service.get_sequences({"shotgun_id": 1})
self.tasks = self.get("data/tasks?entity_id=%s" % sequences[0]["id"])
self.assertEqual(len(self.tasks), 1)
def test_import_scene_task(self):
self.load_scene_task()
scenes = shots_service.get_scenes({"shotgun_id": 1})
self.tasks = self.get("data/tasks?entity_id=%s" % scenes[0]["id"])
self.assertEqual(len(self.tasks), 1)
def test_import_remove_task(self):
self.load_task()
api_path = "/import/shotgun/remove/task"
sg_task = {"id": self.sg_task["id"]}
self.tasks = self.get("data/tasks?shotgun_id=%s" % self.sg_task["id"])
task = self.tasks[0]
response = self.post(api_path, sg_task, 200)
self.assertEqual(response["removed_instance_id"], task["id"])
self.tasks = self.get("data/tasks?shotgun_id=%s" % self.sg_task["id"])
self.assertEqual(len(self.tasks), 0)
| cgwire/zou | tests/source/shotgun/test_shotgun_import_tasks.py | test_shotgun_import_tasks.py | py | 6,363 | python | en | code | 152 | github-code | 13 |
11562287151 | import os
from starter_code_section_5.models.item import ItemModel
from starter_code_section_5.tests.base_test import BaseTest
class ItemModelIntegrationTest(BaseTest):
# printing out the running unit test file location/name
print("Running unit tests from: " +
os.path.dirname(__file__) +
'\\' + os.path.basename(__file__) +
"\n")
def test_crud(self):
with self.app_context():
name = 'product'
price = 99.99
item = ItemModel(name, price)
self.assertIsNone(ItemModel.find_by_name(name)) # make sure that item does not exist in db
item.save_to_db() # save an item into db
self.assertIsNotNone(ItemModel.find_by_name(name)) # make sure that item does exist in db
item.delete_from_db()
self.assertIsNone(ItemModel.find_by_name(name)) # make sure that item does not exist in db
| ikostan/automation_with_python | starter_code_section_5/tests/integration/models/test_item.py | test_item.py | py | 930 | python | en | code | 0 | github-code | 13 |
31504405971 | from django.shortcuts import render,redirect, get_object_or_404, reverse
from .forms import ArticleForm, CommentForm
from django.contrib import messages
from .models import Article, Comment, Tag
from django.utils.text import slugify
from django.contrib.auth.decorators import permission_required, login_required
from django.core.paginator import Paginator, EmptyPage, PageNotAnInteger
@login_required
def add(request):
form = ArticleForm(request.POST or None, request.FILES or None)
if form.is_valid():
article = form.save(commit=False)
article.author = request.user
article.slug = slugify(form.data['title'])
article.save()
article.tags.add(form.data['tags'])
messages.success(request,'Məqalə əlavə olundu')
return redirect('user:dashboard')
return render(request,'article/add.html',{'form':form})
@login_required
def edit(request, id):
article = get_object_or_404(Article, id=id)
if request.user.has_perm('article.change_article') or article.author==request.user:
form = ArticleForm(instance=article,data=request.POST or None, files=request.FILES or None)
if form.is_valid():
form.save()
messages.success(request,'Məqalə redəktə olundu')
return redirect('article:detail',id=article.id, slug=article.slug)
return render(request, 'article/edit.html',{'form':form})
else:
messages.warning(request,'Buna icazəniz yoxdur')
return redirect('index')
@login_required
def delete(request, id):
article = get_object_or_404(Article, id=id)
if request.user.has_perm('article.delete_article') or article.author==request.user:
article.delete()
messages.success(request,'Məqalə silindi')
return redirect('index')
else:
messages.warning(request,'Buna icazəniz yoxdur')
return redirect('index')
def detail(request,id,slug='default'):
article = get_object_or_404(Article,id=id)
form = CommentForm(request.POST or None)
if form.is_valid() and request.user.is_authenticated:
comment = form.save(commit=False)
comment.article = article
comment.author = request.user
comment.save()
form = CommentForm()
return render(request,'article/detail.html',{'article':article,'form':form})
# TAG
def tags(request,name):
articles = Article.objects.filter(tags__name=name)
paginator = Paginator(articles, 4)
page = request.GET.get('page', 1)
tags = Tag.objects.all()
try:
articles = paginator.get_page(page)
except PageNotAnInteger:
articles = paginator.get_page(1)
except EmptyPage:
articles = paginator.get_page(paginator.num_pages)
return render(request,'index.html',{'articles':articles,'tags':tags}) | LitmusPaper/zaknews | article/views.py | views.py | py | 2,819 | python | en | code | 0 | github-code | 13 |
7679304435 | import os
# pid = os.fork()
# if pid == 0:
# print("I am from father process")
# else:
# print("I am from child process")
from multiprocessing import Process, Pool
# 子进程要执行的代码
def run_proc(name):
print('Run child process %s (%s)...' % (name, os.getpid()))
if __name__=='__main__':
print('Parent process %s.' % os.getpid())
l = Pool(4)
l.apply_async(run_proc, args=('test',))
# p = Process(target=run_proc, args=('test',))
# print('Child process will start.')
# p.start()
# p.join()
print('Child process end.')
# Python 的 multiprocessing和Queue还支持分布式部署
| JesseCodeBones/python_study_2 | process_1.py | process_1.py | py | 638 | python | en | code | 0 | github-code | 13 |
70505585299 | # coding: utf-8
import base64
import datetime
import threading
def data_uri(data):
return 'data:image/png;base64,' + base64.b64encode(data)
def daterange(start_date=None, end_date=None, date_range=None):
if date_range:
start_date = min(date_range)
end_date = max(date_range)
for n in xrange((end_date - start_date).days):
yield start_date + datetime.timedelta(n)
def wait_for_threads(threads):
for t in threads:
t.start()
for t in threads:
if t.is_alive():
t.join()
def parallel_foreach(func, iterable):
threads = [threading.Thread(target=func, args=(item,))
for item in iterable]
wait_for_threads(threads)
| berkerpeksag/github-badge | app/helpers.py | helpers.py | py | 712 | python | en | code | 290 | github-code | 13 |
3638873510 | import json
import argparse
import numpy as np
from distutils.version import LooseVersion
parser = argparse.ArgumentParser(description="Make a combined arch-specific core package list")
parser.add_argument("--linux",
help="conda list json file with list of linux packages")
parser.add_argument("--osx",
help="conda list json file with list of osx packages")
parser.add_argument("--out",
help="filename for output file with combined list of files"
" for use in metapackage ")
args = parser.parse_args()
def core_pkgs(pkgs):
# This defines the "core" packages as everything that came from defaults
# channel except the conda (conda, conda-build, conda-env, etc) packages
core = {p['name']:p['version'] for p in pkgs if ((p['channel'] == 'defaults')
& (not p['name'].startswith('conda')))}
return core
pkgs = {'linux': core_pkgs(json.load(open(args.linux))),
'osx': core_pkgs(json.load(open(args.osx)))}
full = np.unique(list(pkgs['linux']) + list(pkgs['osx']))
pkglist = []
for p in full:
# For packages that are automatically installed (by dependency request) on both
# OSes, here we've just selected the minimum version to require. The actual set of
# packages still needs to be tested after creation.
if p in pkgs['linux'] and p in pkgs['osx']:
versions = [pkgs['linux'][p], pkgs['osx'][p]]
versions.sort(key=LooseVersion)
pkglist.append(" - {} =={}".format(p, versions[0]))
if p in pkgs['linux'] and p not in pkgs['osx']:
pkglist.append(" - {} =={} [linux]".format(p, pkgs['linux'][p]))
if p in pkgs['osx'] and p not in pkgs['linux']:
pkglist.append(" - {} =={} [osx]".format(p, pkgs['osx'][p]))
open(args.out, 'w').write("\n".join(pkglist))
| ddkauffman/skare3 | pkg_defs/ska3-core/combine_arch.py | combine_arch.py | py | 1,880 | python | en | code | 0 | github-code | 13 |
26260455223 | # -*- coding: utf-8 -*-
"""
Created on Fri Nov 27 12:23:25 2020
@author: vinmue
"""
import numpy as np
from tensorflow.keras.layers import Dense, Dropout, Input, BatchNormalization
from tensorflow.keras.models import load_model, clone_model
import tensorflow.keras as keras
from tensorflow.keras.optimizers import Adam
import tensorflow as tf
from tensorflow.keras.models import clone_model
class ReplayBuffer(object):
def __init__(self,state_len,mem_size):
self.state_len = state_len
self.mem_size = mem_size
self.mem_counter = 0
self.states = np.zeros((mem_size,state_len),dtype = np.int32)
self.actions = np.zeros(mem_size,dtype = np.int32)
self.rewards = np.zeros(mem_size, dtype = float)
self.new_states = np.zeros((mem_size,state_len),dtype = np.int32)
self.dones = np.zeros(mem_size,dtype = np.int32)
def store_transition(self,state, action, reward, new_state,done):
self.states[self.mem_counter,:] = state
self.actions[self.mem_counter] = action
self.rewards[self.mem_counter] = reward
self.new_states[self.mem_counter,:] = new_state
self.dones[self.mem_counter] = done
self.mem_counter+=1
def sample_memory(self,batch_size):
max_memory = min(self.mem_size,self.mem_counter)
batch = np.random.choice(np.arange(max_memory),batch_size,replace=False)
states = self.states[batch,:]
actions = self.actions[batch]
rewards = self.rewards[batch]
new_states = self.new_states[batch,:]
dones = self.dones[batch]
return states,actions,rewards,new_states, dones
class DQNetwork(keras.Model):
def get_config(self):
pass
def __init__(self,state_len, n_actions,learning_rate, **layers_layout):
super(DQNetwork,self).__init__()
self.Input = Input(shape = (None, state_len))
self.layers_layout = layers_layout
self.learning_rate = learning_rate
self.n_actions = n_actions
self.layers_layout = layers_layout
for name, layer in layers_layout.items():
setattr(self, name,eval(layer))
print(name)
self.q = Dense(n_actions,activation =None)
#@tf.function
def call(self,state):
value = state
for name, layer in self.layers_layout.items():
value = getattr(self,name)(value)
q = self.q(value)
return q
class DQAgent(object):
def __init__(self,learning_rate,gamma,batch_size,state_len, n_actions, min_memory_for_training,
epsilon,epsilon_min = 0.01,epsilon_dec = 1e-3,
mem_size=1000000, model_file = "dqn_model.h5", frozen_iterations=8, **layers_layout):
#input arguments
self.it_counter =0
self.gamma = gamma
self.batch_size = batch_size
self.state_len = state_len
self.n_actions = n_actions
self.epsilon = epsilon
self.epsilon_min = epsilon_min
self.epsilon_dec = epsilon_dec
self.mem_size = mem_size
self.model_file = model_file
self.min_memory_for_training = min_memory_for_training
#new attributes
self.q_policy = DQNetwork(state_len, n_actions,learning_rate,**layers_layout)
self.q_target = DQNetwork(state_len, n_actions,learning_rate,**layers_layout)
self.q_target.set_weights(self.q_policy.get_weights())
self.q_policy.compile(loss="mean_squared_error", optimizer=Adam(learning_rate))
self.q_target.compile(loss="mean_squared_error", optimizer=Adam(learning_rate))
self.replay_buffer = ReplayBuffer(self.state_len,mem_size)
#########
self.frozen_iterations = frozen_iterations
#loading model TODO
def store_transition(self, state, action, reward, new_state, done):
self.replay_buffer.store_transition(state,action,reward,new_state,done)
#@tf.function
def choose_action(self,state):
#print("states in choose action: ",state)
if np.random.random()<self.epsilon:
action = np.random.choice(self.n_actions)
else:
state = state.reshape(1,len(state))
q_st = self.q_policy(state)
action = np.argmax(q_st)
return int(action)
def learn(self):
if self.replay_buffer.mem_counter < self.min_memory_for_training:
return
states, actions, rewards, new_states, dones = self.replay_buffer.sample_memory(self.batch_size)
#print("states in learn: ",states)
#print("new_states: ",new_states)
states = tf.convert_to_tensor(states,dtype = tf.float32)
actions = tf.convert_to_tensor(actions,dtype = tf.int64)
rewards = tf.convert_to_tensor(rewards, dtype = tf.float32)
new_states = tf.convert_to_tensor(new_states, dtype = tf.float32)
dones = tf.convert_to_tensor(dones, dtype = tf.float32)
gamma = tf.constant(self.gamma)
with tf.GradientTape() as tape:
loss = 0
for i,(state, action, reward,new_state, done) in enumerate(zip(states, actions, rewards,new_states, dones)):
state = tf.expand_dims(state, axis =0)
new_state = tf.expand_dims(new_state,axis =0)
q_eval = tf.squeeze(self.q_policy(state))[action]
q_next = tf.squeeze(tf.reduce_max(self.q_target(new_state)))
q_target = reward + gamma * q_next
loss += tf.pow(q_target-q_eval,2)
gradient = tape.gradient(loss, self.q_policy.trainable_variables)
self.q_policy.optimizer.apply_gradients(zip(gradient, self.q_policy.trainable_variables))
self.epsilon = self.epsilon- self.epsilon_dec if self.epsilon - self.epsilon_dec \
> self.epsilon_min else self.epsilon_min
self.it_counter += 1
if self.it_counter % self.frozen_iterations == 0:
self.q_target.set_weights(self.q_policy.get_weights())
return | ViniTheSwan/ReinforcementTrading | parent/Trading/RL/DeepQLearningEager.py | DeepQLearningEager.py | py | 6,131 | python | en | code | 1 | github-code | 13 |
13379124013 | import errno
import itertools
import os
import sys
# pip install pptree
import pptree
# To make the start,end line working, put this line of code before importing ElementTree
sys.modules['_elementtree'] = None
import xml.etree.ElementTree as ET
# To make this working, use python 3.8 or older version
class LineNumberingParser(ET.XMLParser):
def _start(self, *args, **kwargs):
# Here we assume the default XML parser which is expat
# and copy its element position attributes into output Elements
element = super(self.__class__, self)._start(*args, **kwargs)
element._start_line_number = self.parser.CurrentLineNumber
element._start_column_number = self.parser.CurrentColumnNumber
element._start_byte_index = self.parser.CurrentByteIndex
return element
def _end(self, *args, **kwargs):
element = super(self.__class__, self)._end(*args, **kwargs)
element._end_line_number = self.parser.CurrentLineNumber
element._end_column_number = self.parser.CurrentColumnNumber
element._end_byte_index = self.parser.CurrentByteIndex
return element
class Node:
def __init__(self, data='', start=0, end=0, parent=None):
self.start = start
self.end = end
self.parent = parent
self.children = []
self.name = data
def __repr__(self):
return self.name
def is_root(self):
return self.parent is None
def is_leaf(self):
return len(self.children) == 0
def depth(self):
if self.is_root():
return 0
else:
return 1 + self.parent.depth()
def display(self):
pptree.print_tree(self, 'children', 'name', True)
def add_child(self, node):
node.parent = self
self.children.append(node)
class Tree:
def __init__(self):
self.root = None
self.level = 0
self.nodes = []
def insert(self, node, parent):
if parent is not None:
parent.add_child(node)
else:
if self.root is None:
self.root = node
self.nodes.append(node)
def searchIndex(self, name):
occurrences = []
for i, N in enumerate(self.nodes):
if N.name == name:
occurrences.append(i)
return occurrences
def searchNode(self, name):
occurrences=[]
for N in self.nodes:
if N.name == name:
occurrences.append(N)
return occurrences
def searchInChildren(self,q,name):
occurrences = []
for i, N in enumerate(q.children):
if N.name == name:
occurrences.append(N)
return occurrences
def getNode(self, index):
return self.nodes[index]
def root(self):
return self.root
class FastTreeMatch:
def __init__(self):
self.numFiles = 0
self.docNumber = 0
self.targetTree = Tree()
self.candidateTree = Tree()
self.listOfStack={}
# Tq would be one level. such as q= a, then Ta = {a1, a2, a3 .. an} and all would only have one ancestor or parent
# Tq, list of sorted occurrences of q. represented by a triplet, (start,end,level)
def insertNodeToTree(self, tree, data, parent):
# insert the current data
name = data.tag
if '}' in name:
name = data.tag.split("}", 1)[1] # ignore anything in the {}
parentNode = Node(name, data._start_line_number, data._end_line_number, parent)
tree.insert(parentNode, parent)
childNode = Node()
# if the current tag has value, then insert value as a child
if data.text.strip():
lineNumber = (data._end_line_number - data._start_line_number) // 2 + data._start_line_number
children = [data.text]
childNode = Node(data.text, lineNumber, lineNumber, parentNode)
# parentNode.add_child(childNode)
tree.insert(childNode, parentNode)
for child in data:
if data.text.strip():
self.insertNodeToTree(tree, child, childNode)
else:
self.insertNodeToTree(tree, child, parentNode)
return tree
def makeATree(self, fileName):
parser = LineNumberingParser()
root = ET.parse(fileName, parser=parser).getroot()
tree = Tree()
if 'math' in root.tag and len(root)==1:
root = root[0]
# data, start, end, parent, children
tree = self.insertNodeToTree(tree, root, None)
return tree
def run(self, targetFile, dataset):
matchingFile = []
self.numFiles = len(dataset)
try:
self.targetTree = newTree.makeATree(targetFile)
# self.targetTree.root.display()
except:
raise FileNotFoundError(errno.ENOENT, os.strerror(errno.ENOENT), targetFile)
for f in dataset:
try:
self.candidateTree = newTree.makeATree(f)
except:
raise FileNotFoundError(errno.ENOENT, os.strerror(errno.ENOENT), f)
result = self.treeMatch(self.targetTree.root)
if result:
matchingFile.append(f)
# Result, if the matchingFile is empty means we did not find a matching file
print("These files are matching to the equation:", matchingFile)
'''
Tree match algorithm based off the paper by Yao
@param q the root node of the sub tree we are trying to match
'''
def treeMatch(self, q):
result = False
tq = self.candidateTree.searchNode(q.name) #(list)Tq are occurrences of the pattern node q in data source.
for qi in tq:
if(self.find(q,qi)):
result = True
return result
'''
determines whether the current occurrence Tq→current is a partial solution.
@param q is the node in the target tree
@param tq Tq→current is a partial solution means matchings of
sub-tree patterns rooted by q have been found and encoded
in the stacks and these matchings are possibly extended to
final results
@return If Tq→current is false/ not a partial solution,
function CleanStack() is called to remove the recoded
nodes that are its descendants.
returns true if the list is not empty, the end is bigger than the start, and i = N;
'''
def find(self, q, tq):
if tq.is_leaf():
return True
numOfChildren = len(q.children)
i = 0 #q_i(i = 0, 1, 2, ... n-1) are q's children
stack_i = []
partialSolution = False
if q.name =='mi':
return True
tq_i = self.candidateTree.searchInChildren(tq,q.children[i].name)
while partialSolution or tq_i:
if (not tq_i) or (tq_i[0].start > tq.end):
if(partialSolution):
i=i+1
partialSolution = False
else:
j=0
while j < i:
self.cleanStack(q.children[j])
j += 1
return False
if i==numOfChildren:
return True
tq_i = self.candidateTree.searchInChildren(tq, q.children[i].name)
else:
if tq_i[0].start >= tq.start:
if self.find(q.children[i],tq_i[0]):
stack_i.append([tq_i[0], tq])
self.listOfStack[q.children[i]] = stack_i
partialSolution = True
tq_i.pop(0)
return False
'''
* Function GenerateSolution() and GenerateSolution2()
* produce two varieties of explicit representation
* of the final result
* @param q
'''
def generateSolution(self,q):
n = len(q.children)
i = 0
while i < n:
self.generateSolution(q.children[i])
self.listOfStack[q].extend(self.listOfStack[q.children[i]])
self.listOfStack[q] = [ [y[0] for y in g] + [i] for i, g in itertools.groupby(self.listOfStack[q], key = lambda x: x[-1])]
# print(self.listOfStack[q])
# print('@'*50)
i += 1
def cleanStack(self,q):
n = q.numChildren
i = 0
while i < n:
self.cleanStack(q.children[i])
i+=1
parentNode = self.listOfStack[q][-1].parent
while(self.listOfStack[q]) and (self.listOfStack[q][-1].parent == parentNode):
self.listOfStack[q].pop()
# Run this as a main function
if __name__ == "__main__":
# import os
#
# dir_path = os.path.dirname(os.path.realpath(__file__))
# print(dir_path)
# with open("target.xml", "r") as f:
# l = f.read()
# print(l)
newTree = FastTreeMatch()
files = []
folder = "candidate"
#folder = "madelynnTests" # madelynn you cant have folders inside folders
for f in os.listdir(folder):
files.append(folder + '/' + f)
newTree.run("target.xml", files)
| gsiqi/xmlTreeMatching | treeMatch.py | treeMatch.py | py | 9,130 | python | en | code | 0 | github-code | 13 |
16755437255 | """Test cnot."""
import numpy as np
from toqito.matrices import cnot
def test_cnot():
"""Test standard CNOT gate."""
res = cnot()
expected_res = np.array([[1, 0, 0, 0], [0, 1, 0, 0], [0, 0, 0, 1], [0, 0, 1, 0]])
bool_mat = np.isclose(res, expected_res)
np.testing.assert_equal(np.all(bool_mat), True)
| vprusso/toqito | toqito/matrices/tests/test_cnot.py | test_cnot.py | py | 324 | python | en | code | 118 | github-code | 13 |
10347857061 | from __future__ import print_function
import argparse
import sys
import ijson # .backends.yajl2_cffi as ijson
import random
import os
from program_helper.ast.parser.ast_exceptions import TooLongBranchingException, TooLongLoopingException, \
VoidProgramException, TooManyVariableException, UnknownVarAccessException, IgnoredForNowException, \
TooManyTryException, NestedAPIParsingException, TooDeepException, TypeMismatchException, RetStmtNotExistException
from program_helper.program_reader import ProgramReader
from synthesis.write_java import Write_Java
from utilities.basics import conditional_director_creator
from utilities.logging import create_logger
MAX_AST_DEPTH = 64
MAX_FP_DEPTH = 10
MAX_FIELDS = 10
MAX_CAMELCASE = 3
MAX_KEYWORDS = 10
MAX_VARIABLES = 10
MAX_LOOP_NUM = 2
MAX_BRANCHING_NUM = 2
MAX_TRY_NUM = 2
SEED = 12
class Reader:
def __init__(self,
dump_data_path=None,
infer=False,
infer_vocab_path=None,
repair_mode=True,
dump_ast=False
):
'''
:param filename: JSON file to read from
:param dump_data_path: data path to dump to
:param infer_vocab_path: config to load from if infer
:param infer: if used for inference
'''
assert infer_vocab_path is None if infer is False else not None
self.infer = infer
random.seed(SEED)
conditional_director_creator(dump_data_path)
self.logger = create_logger(os.path.join(dump_data_path, 'data_read.log'))
self.logger.info('Reading data file...')
self.program_reader = ProgramReader(
max_ast_depth=MAX_AST_DEPTH, max_loop_num=MAX_LOOP_NUM,
max_branching_num=MAX_BRANCHING_NUM,
max_fp_depth=MAX_FP_DEPTH,
max_camel=MAX_CAMELCASE,
max_fields=MAX_FIELDS,
max_keywords=MAX_KEYWORDS,
max_trys=MAX_TRY_NUM,
max_variables=MAX_VARIABLES,
data_path=dump_data_path,
infer=infer,
infer_vocab_path=infer_vocab_path,
logger=self.logger,
repair_mode=repair_mode
)
self.done, self.ignored_for_branch, self.ignored_for_loop, self.ignored_for_try, \
self.ignored_for_illegal_var_access, self.ignored_for_nested_api, \
self.ignored_for_void, self.ignored_for_more_vars, self.ignored_for_now, \
self.ignored_for_depth, self.ignored_for_type, self.ignored_for_ret_stmt_not_exist, self.ignored_for_unknown\
= 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
self.data_points = None
self.dump_ast = dump_ast
def read_file(self, filename=None, prog_array=None, max_num_data=None):
if filename is not None and prog_array is not None:
raise Exception
if filename is not None:
prog_array_in = ijson.items(open(filename, 'rb'), 'programs.item')
elif prog_array is not None:
prog_array_in = prog_array
else:
raise Exception
temp_data_points = self.read_data_from_array(prog_array_in, max_num_data=max_num_data)
if self.data_points is None:
self.data_points = temp_data_points
else:
self.data_points.extend(temp_data_points)
def wrangle(self, min_num_data=0):
# randomly shuffle to avoid bias towards initial data points during training
random.shuffle(self.data_points)
##
ast_programs, all_var_mappers, return_types, formal_params, field_array, \
apicalls, types, keywords, \
method, classname, javadoc_kws, \
surr_ret, surr_fp, surr_method, surr_method_ids, \
self.passed_jsons, self.real_javas, self.checker_outcome_strings, self.asts = \
zip(*self.data_points) # unzip
if self.passed_jsons[0] == None:
self.passed_jsons = None
if self.real_javas[0] == None:
self.real_javas = None
if self.checker_outcome_strings[0] == None:
self.checker_outcome_strings = None
if self.asts[0] == None:
self.asts = None
self.program_reader.wrangle(ast_programs, all_var_mappers, return_types, formal_params, field_array,
apicalls, types, keywords,
method, classname, javadoc_kws, surr_ret, surr_fp, surr_method, surr_method_ids,
min_num_data=min_num_data)
def dump(self):
java_synthesis, javas_synthesized = Write_Java(rename_vars=True), list()
program_jsons = list()
if self.passed_jsons is not None:
for json, outcome in zip(self.passed_jsons, self.checker_outcome_strings):
try:
java_program = java_synthesis.program_synthesize_from_json(json, comment=outcome)
except:
java_program = "Could not synthesize java program"
javas_synthesized.append(java_program)
program_jsons.append(json)
self.program_reader.save_data(effective_javas=javas_synthesized,
real_javas=self.real_javas,
program_jsons=program_jsons,
program_asts=self.asts)
self.logger.info('Done!')
def read_data_from_array(self, prog_array, max_num_data=None):
data_points = []
for program in prog_array:
data_point = self.read_one_json_program(program)
if data_point is None:
continue
data_points.append(data_point)
self.done += 1
if self.done % 1000 == 0 and self.done > 0:
self.logger.info('Extracted data for {} programs'.format(self.done))
if max_num_data is not None and self.done >= max_num_data:
break
self.logger.info('Extracted data for {} programs'.format(self.done))
return data_points
def read_one_json_program(self, program):
data_point = None
if 'ast' not in program:
return None
try:
parsed_ast, all_var_mappers, return_type_id, parsed_fp_array, parsed_field_array, \
apicalls, types, keywords, \
method, classname, javadoc_kws, \
surr_ret, surr_fp, surr_method_names, surr_method_ids,\
mod_program_js, checker_outcome_string = \
self.program_reader.read_json(program)
program_ast = program['ast'] if self.dump_ast or self.infer else None
body = program['body'] if self.infer else None
data_point = (parsed_ast, all_var_mappers, return_type_id, parsed_fp_array, parsed_field_array,
apicalls, types, keywords, method, classname, javadoc_kws,
surr_ret, surr_fp, surr_method_names, surr_method_ids, mod_program_js, body,
checker_outcome_string, program_ast)
except TooLongLoopingException as e1:
self.ignored_for_loop += 1
except TooLongBranchingException as e2:
self.ignored_for_branch += 1
except TooManyTryException as e2:
self.ignored_for_try += 1
except NestedAPIParsingException as e2:
self.ignored_for_nested_api += 1
except UnknownVarAccessException as e3:
self.ignored_for_illegal_var_access += 1
except RetStmtNotExistException as e3:
self.ignored_for_ret_stmt_not_exist += 1
except VoidProgramException as e4:
self.ignored_for_void += 1
except TooManyVariableException as e5:
self.ignored_for_more_vars += 1
except IgnoredForNowException as e6:
self.ignored_for_now += 1
except TooDeepException as e6:
self.ignored_for_depth += 1
except TypeMismatchException as e7:
self.ignored_for_type += 1
return data_point
def log_info(self):
self.logger.info('{:8d} programs/asts in training data'.format(self.done))
self.logger.info('{:8d} programs/asts missed in training data for loop'.format(self.ignored_for_loop))
self.logger.info('{:8d} programs/asts missed in training data for branch'.format(self.ignored_for_branch))
self.logger.info('{:8d} programs/asts missed in training data for try'.format(self.ignored_for_try))
self.logger.info(
'{:8d} programs/asts missed in training data for illegal var access'.format(
self.ignored_for_illegal_var_access))
self.logger.info('{:8d} programs/asts missed in training data for being void'.format(self.ignored_for_void))
self.logger.info(
'{:8d} programs/asts missed in training data for too many variables'.format(self.ignored_for_more_vars))
self.logger.info('{:8d} programs/asts missed in training data for ignored now'.format(self.ignored_for_now))
self.logger.info('{:8d} programs/asts missed in training data for ignored for nested api parsing'.format(
self.ignored_for_nested_api))
self.logger.info(
'{:8d} programs/asts missed in training data for ignored for depth'.format(self.ignored_for_depth))
self.logger.info(
'{:8d} programs/asts missed in training data for ignored for type mismatch'.format(self.ignored_for_type))
self.logger.info(
'{:8d} programs/asts missed in training data for ignored for ret stmt not exist'.format(self.ignored_for_ret_stmt_not_exist))
self.logger.info(
'{:8d} programs/asts missed in training data for ignored for check'.format(self.ignored_for_unknown))
#if self.infer:
self.program_reader.ast_reader.ast_checker.print_stats(logger=self.logger)
# %%
if __name__ == '__main__':
parser = argparse.ArgumentParser(formatter_class=argparse.RawDescriptionHelpFormatter)
parser.add_argument('input_file', type=str, nargs=1,
help='input data file')
parser.add_argument('--python_recursion_limit', type=int, default=100000,
help='set recursion limit for the Python interpreter')
parser.add_argument('--data_path', type=str, default='data',
help='data to be saved here')
clargs_ = parser.parse_args()
sys.setrecursionlimit(clargs_.python_recursion_limit)
filename_ = clargs_.input_file[0]
r = Reader(dump_data_path=clargs_.data_path)
r.read_file(filename=filename_, max_num_data=None)
r.wrangle()
r.log_info()
r.dump()
| rohanmukh/nsg | data_extraction/data_reader/data_reader.py | data_reader.py | py | 10,723 | python | en | code | 20 | github-code | 13 |
71601962259 | import queue
import time
import pygame
import random
pygame.init()
width = 800
height = 800
rows = 10
cols = 10
mines = 15
size = width / rows
num_font = pygame.font.SysFont('Arial', 25, bold=True)
lost_font = pygame.font.SysFont('Arial', 45, bold=True)
time_font = pygame.font.SysFont('Arial', 35, bold=True)
num_colors = {1: 'green', 2: 'red', 3: 'orange', 4: 'purple', 5: 'blue', 6: 'black', 7: 'pink', 8: 'yellow'}
rect_color = (200, 200, 200)
clicked_rect_color = (140, 140, 140)
flag_color = 'red'
bomb_color = 'red'
rec_color = 'white'
win = pygame.display.set_mode((width, height))
pygame.display.set_caption('Сапер_nfactorial')
background_color = 'white'
def get_neigh(row, col, rows, cols):
neighbors = []
if row > 0:
neighbors.append((row - 1, col))
if row < rows - 1:
neighbors.append((row + 1, col))
if col > 0:
neighbors.append((row, col - 1))
if col < cols - 1:
neighbors.append((row, col + 1))
if row > 0 and col > 0:
neighbors.append((row - 1, col - 1))
if row < rows - 1 and col < cols - 1:
neighbors.append((row + 1, row + 1))
if row < rows - 1 and col > 0:
neighbors.append((row + 1, col - 1))
if row > 0 and col < cols - 1:
neighbors.append((row - 1, col + 1))
return neighbors
def create_field(rows, cols, mines):
field = [[0 for _ in range(cols)] for _ in range(rows)]
mines_pos = set()
while len(mines_pos) < mines:
row = random.randrange(0, rows)
col = random.randrange(0, cols)
pos = row, col
if pos in mines_pos:
continue
mines_pos.add(pos)
field[row][col] = -1
for mine in mines_pos:
neighbors = get_neigh(*mine, rows, cols)
for r, c in neighbors:
if field[r][c] != -1:
field[r][c] += 1
return field
def draw(win, field, cover_field, current_time):
win.fill(background_color)
time_text = time_font.render(f'Time Elapsed: {round(current_time)}', 1, 'black')
win.blit(time_text, (10, height - time_text.get_height()))
for i, row in enumerate(field):
y = size * i
for j, value in enumerate(row):
x = size * j
is_covered = cover_field[i][j] == 0
is_flag = cover_field[i][j] == -2
is_bomb = value == -1
if is_flag:
pygame.draw.rect(win, flag_color, (x, y, size, size))
pygame.draw.rect(win, 'black', (x, y, size, size), 2)
continue
if is_covered:
pygame.draw.rect(win, rec_color, (x, y, size, size))
pygame.draw.rect(win, 'black', (x, y, size, size), 2)
continue
else:
pygame.draw.rect(win, clicked_rect_color, (x, y, size, size))
pygame.draw.rect(win, 'black', (x, y, size, size), 2)
if is_bomb:
pygame.draw.circle(win, bomb_color, (x + size / 2, y + size / 2), radius=size / 2 - 4)
if value > 0:
text = num_font.render(str(value), 1, num_colors[value])
win.blit(text, (x + (size / 2 - text.get_width() / 2), y + (size / 2 - text.get_height() / 2)))
pygame.display.update()
def uncover_from_pos(row, col, cover_field, field):
q = queue.Queue()
q.put((row, col))
visited = set()
while not q.empty():
current = q.get()
neighbors = get_neigh(*current, rows, cols)
for r, c in neighbors:
if (r, c) in visited:
continue
value = field[r][c]
if value == 0 and cover_field[r][c] != -2:
q.put((r, c))
if cover_field[r][c] != -2:
cover_field[r][c] = 1
visited.add((r, c))
def get_grid_pos(mouse_pos):
mx, my = mouse_pos
row = int(my // size)
col = int(mx // size)
return row, col
def draw_lost(win, text):
text = lost_font.render(text, 1, 'black')
win.blit(text, (width / 2 - text.get_width() / 2, height / 2 - text.get_height() / 2))
pygame.display.update()
def main():
run = True
field = create_field(rows, cols, mines)
cover_field = [[0 for _ in range(cols)] for _ in range(rows)]
flags = mines
clicks = 0
start_time = time.time()
while run:
current_time = time.time() - start_time
for event in pygame.event.get():
if event.type == pygame.QUIT:
run = False
break
if event.type == pygame.MOUSEBUTTONDOWN:
row, col = get_grid_pos(pygame.mouse.get_pos())
if row >= rows or col >= cols:
continue
mouse_pressed = pygame.mouse.get_pressed()
if mouse_pressed[0] and cover_field[row][col] != -2:
cover_field[row][col] = 1
if field[row][col] == -1:
draw(win, field, cover_field, current_time)
draw_lost(win, 'Вы проиграли! Попробуйте ещё раз')
pygame.time.delay(5000)
field = create_field(rows, cols, mines)
cover_field = [[0 for _ in range(cols)] for _ in range(rows)]
flags = mines
clicks = 0
if clicks == 0 or field[row][col] == 0:
uncover_from_pos(row, col, cover_field, field)
if clicks == 0:
start_time = time.time()
clicks += 1
elif mouse_pressed[2]:
if cover_field[row][col] == -2:
cover_field[row][col] = 0
flags += 1
else:
flags -= 1
cover_field[row][col] = -2
draw(win, field, cover_field, current_time)
pygame.quit()
if __name__ == '__main__':
main()
| meirrrrr/saper_game_by_meirrrr | main.py | main.py | py | 6,055 | python | en | code | 0 | github-code | 13 |
13998103670 | from selenium import webdriver
from selenium.webdriver.support.ui import WebDriverWait
from my_fake_useragent import UserAgent
import requests
import json
import time
# 通过webdriver获取页面内容
def getDrivertDriverByWebdriver(url):
opt=webdriver.ChromeOptions()
# 请求头伪装
opt.add_argument('--user-agent=%s' % UserAgent().random())
# 已开发者模式启动浏览器
opt.add_experimental_option('excludeSwitches',['enable-automation'])
# 屏蔽保存密码提示框
prefs={'credentials_enable_service':False,'profile.password_manager_enabled':False}
opt.add_experimental_option('prefs',prefs)
# 反爬虫特征处理
opt.add_argument('--disable-blink-features=AutomationControlled')
#ip代理
opt.add_argument('--proxy-server={}'.format(getReuestIpProxies()))
driver=webdriver.Chrome(options=opt)
# 浏览器最大化
driver.maximize_window()
driver.get(url=url)
WebDriverWait(driver=driver,timeout=25)
return driver
# 通过requests库获取页面内容
def getHtmlContentByRequests(url):
proxies={
'http':'http://'+getReuestIpProxies()
}
header={
'User-Agent':UserAgent().random
}
reponse=requests.get(url=url,headers=header,proxies=proxies)
reponse.encoding='utf-8'
content=reponse.text
return content
# ip代理
def getReuestIpProxies():
ipUrle='https://api.xxx.com:8522/api/getIpEncrypt?dataType=0&encryptParam=SlDyzgfgDW12vuaMHmQkM1l3svlLMXCHw0IlSHvOue3lVhShpdEjb9vG2YRiwpyEuRVyxit%2BS%2BLPfM1vGfsJ7mEjAi0eMq%2Ft61ylMNCQgciuTUwZah7tVApJ9%2B8FOZYiAyw%2B0Hk2DtMscqY%2B%2FhM%2BdDz2krSwDgva62WETzjyvF7YFsJvJYxeLU9YDql4IJq6vs3ERyFHZ9FAgNS8WBDIMt0Jv%2FQlqwlcd4gkrYI6AFg%3D'
reponse=requests.get(url=ipUrle)
reponse.encoding='gb2312'
ipData=json.loads(reponse.text)
ip=''
for row in ipData['data']:
ip=(row['ip'])
port=str(row['port'])
http=ip+':'+port
return http
| Jsonyuanlairuci/python | Response.py | Response.py | py | 1,969 | python | en | code | 1 | github-code | 13 |
14671935328 | # _*_ coding=utf-8 _*_
# 冒泡排序,时间复杂度O(n²)
def bubble_sort(num):
"""
如果冒泡排序中的一次排序没有发生交换,则说明列表已经有序,可以直接结束算法
:param num:
:return:
"""
for i in range(len(num) - 1):
exchange = False
print(num)
for j in range(len(num) - 1 - i):
if num[j] > num[j + 1]:
num[j], num[j + 1] = num[j + 1], num[j]
exchange = True
if not exchange:
return num
return num
l = [33,11,12,1,2,3,4,5,22]
print(bubble_sort(l))
| IsaacNewLee/BubbleSort | bubble_sotr.py | bubble_sotr.py | py | 606 | python | zh | code | 0 | github-code | 13 |
30796376367 | #!/usr/bin/env python3
__version__ = "0.1.0"
import util
from nlp_02 import nlp_025
"""
25. テンプレートの抽出
記事中に含まれる「基礎情報」テンプレートのフィールド名と値を抽出し,
辞書オブジェクトとして格納せよ.
https://ja.wikipedia.org/wiki/Template:基礎情報_国
"""
def test_execute():
excepted = util.expected_file(__file__)
actual = util.dict2tsv(nlp_025.execute("./work/jawiki-country.json.gz"))
assert excepted == actual
| bulldra/nlp100 | tests/nlp_02/test_nlp_025.py | test_nlp_025.py | py | 510 | python | ja | code | 0 | github-code | 13 |
72690348177 | from typing import Literal, Dict, Any
import pytorch_lightning as pl
from torch.utils.data import DataLoader
from .base import BaseDataset, collate_fn
class MVTecDataset(BaseDataset):
CLASSES = ('background', 'nut', 'wood_screw', 'lag_wood_screw', 'bolt', # 0-4
'black_oxide_screw', 'shiny_screw', 'short_wood_screw', 'long_lag_screw', # 5-8
'large_nut', 'nut2', 'nut1', 'machine_screw', # 9-12
'short_machine_screw') # 13
PALETTE = [(255, 255, 255), (165, 42, 42), (189, 183, 107), (0, 255, 0), (255, 0, 0),
(138, 43, 226), (255, 128, 0), (255, 0, 255), (0, 255, 255),
(255, 193, 193), (0, 51, 153), (255, 250, 205), (0, 139, 139),
(255, 255, 0)]
IMAGE_MEAN = (211.35 / 255, 166.559 / 255, 97.271 / 255)
IMAGE_STD = (43.849 / 255, 40.172 / 255, 30.459 / 255)
MERGED_CLASSES = ('background', 'nut', 'wood_screw', 'lag_wood_screw', 'bolt',
'black_oxide_screw', 'shiny_screw', 'machine_screw')
MERGED_PALETTE = [(255, 255, 255), (165, 42, 42), (189, 183, 107), (0, 255, 0), (255, 0, 0),
(138, 43, 226), (255, 128, 0), (0, 139, 139)]
def __init__(
self,
save_dir: str = "/workspace/datasets/mvtec.pth",
data_path: str = "/datasets/split_ss_mvtec",
split: Literal["train", "test"]="train",
merged: bool = False,
):
super(MVTecDataset, self).__init__(save_dir, data_path, split)
self.merged = merged
if merged:
self.CLASSES = self.MERGED_CLASSES
self.PALETTE = self.MERGED_PALETTE
def __getitem__(self, idx):
image, ann = super(MVTecDataset, self).__getitem__(idx)
if self.merged:
# 'nut', 'large_nut', 'nut2', 'nut1'
ann['labels'][(ann['labels'] == 1) | (ann['labels'] == 9) | (ann['labels'] == 10) | (ann['labels'] == 11)] = 1
# 'wood_screw', 'short_wood_screw'
ann['labels'][(ann['labels'] == 2) | (ann['labels'] == 7)] = 2
# 'long_lag_screw', 'lag_wood_screw'
ann['labels'][(ann['labels'] == 3) | (ann['labels'] == 8)] = 3
# # 'bolt'
# ann['labels'][ann['labels'] == 4] = 4
# 'short_machine_screw', 'machine_screw'
ann['labels'][(ann['labels'] == 12) | (ann['labels'] == 13)] = 7
# # 'black_oxide_screw'
# ann['labels'][ann['labels'] == 5] = 5
# # 'shiny_screw'
# ann['labels'][ann['labels'] == 6] = 6
return image, ann
class MVTecDataModule(pl.LightningDataModule):
def __init__(
self,
save_dir="/workspace/datasets/mvtec.pth",
data_path="/datasets/split_ss_mvtec",
train_loader_kwargs: Dict[str, Any] = dict(batch_size=1, num_workers=4, shuffle=True, pin_memory=True),
test_loader_kwargs: Dict[str, Any] = dict(batch_size=1, num_workers=4, shuffle=False, pin_memory=True),
):
super(MVTecDataModule, self).__init__()
self.save_dir = save_dir
self.data_path = data_path
self.train_loader_kwargs = train_loader_kwargs
self.test_loader_kwargs = test_loader_kwargs
def setup(self, stage: Literal["fit", "test"] = "fit") -> None:
self.train_dataset = MVTecDataset(
save_dir = self.save_dir,
data_path = self.data_path,
split = "train",
)
self.test_dataset = MVTecDataset(
save_dir = self.save_dir,
data_path = self.data_path,
split = "test",
)
def train_dataloader(self) -> DataLoader:
return DataLoader(self.train_dataset, collate_fn=collate_fn, **self.train_loader_kwargs)
def val_dataloader(self) -> DataLoader:
return DataLoader(self.test_dataset, collate_fn=collate_fn, **self.test_loader_kwargs)
def test_dataloader(self) -> DataLoader:
return DataLoader(self.test_dataset, collate_fn=collate_fn,**self.test_loader_kwargs)
| crazyboy9103/oriented_detection | datasets/mvtec.py | mvtec.py | py | 4,092 | python | en | code | 0 | github-code | 13 |
73515789456 | from openpyxl import Workbook, load_workbook
import decimal
from datetime import datetime
# DBからデータを読み取ってExcelファイルに書き出す場合、
# セルの書式など気にする必要が無ければ、pandasの方が便利、より少ない行で実装できる
wb = Workbook()
ws = wb.active
ws2 = wb.create_sheet("Mysheet", 0) # insert at first position
# 日本語特に問題なし
ws2.cell(row=1, column=1, value="あいうえお")
ws2.cell(row=1, column=2, value=int(100))
ws2.cell(row=1, column=3, value=float(1.9999999999))
ws2.cell(row=1, column=4, value=float(0.25))
ws2.cell(row=1, column=5, value=decimal.Decimal("2.9999999999"))
c = ws2.cell(row=1, column=6, value=datetime.now())
print(c.number_format)
# 日付の書式を設定できる(月がゼロ埋めされないのはExcelの仕様か、2021/3/13のとき)
c.number_format = 'YYYY/MM/DD H:MM:SS'
# これは文字列になる
ws2.cell(row=1, column=7, value="2021/03/13 13:11:59")
# このExcelを既に開いているとエラーになるので注意
wb.save(filename = "write_demo.xlsx")
# テンプレートとなるExcelを読み込んでそれを編集し、別ファイルとして保存もできる
wb2 = load_workbook(filename = 'template.xlsx')
ws3 = wb2['Mysheet']
ws3.cell(row=7, column=1, value="あいうえお")
ws3.cell(row=7, column=2, value=100)
ws3.cell(row=7, column=3, value=1.9999999999)
ws3.cell(row=7, column=4, value=0.25)
ws3.cell(row=7, column=5, value=decimal.Decimal("2.9999999999"))
c = ws3.cell(row=7, column=6, value=datetime.now())
print(c.number_format)
# 日付の書式を設定できる(月がゼロ埋めされないのはExcelの仕様か、2021/3/13のとき)
c.number_format = 'YYYY/MM/DD H:MM:SS'
# これは文字列になる
ws3.cell(row=7, column=7, value="2021/03/13 13:11:59")
wb2.save(filename = "modified.xlsx")
| Fumi76/py_openpyxl_example | write.py | write.py | py | 1,916 | python | ja | code | 0 | github-code | 13 |
27580494813 | import src.constants as c
from src.pre_process import extract_features
from src.test_model import batch_cosine_similarity
from scipy.io.wavfile import read
import numpy as np
import base64
from pydub import AudioSegment
from src import silence_detector
import librosa
def clipped_audio(x, num_frames=c.NUM_FRAMES):
if x.shape[0] > num_frames:
clipped_x = x[0: num_frames]
else:
clipped_x = x
return clipped_x
def convertBase64CafToWav(cafBase64):
cafFile = base64.b64decode(cafBase64);
with open("audio.caf", "wb") as fh:
fh.write(cafFile)
flac_audio = AudioSegment.from_file("audio.caf", "caf")
flac_audio.export("sampleWave.wav", format="wav")
with open("sampleWave.wav", "rb") as voice_file:
encoded_string = base64.b64encode(voice_file.read()).decode('ascii')
return encoded_string;
return '';
def VAD(audio):
chunk_size = int(c.SAMPLE_RATE*0.05) # 50ms
index = 0
sil_detector = silence_detector.SilenceDetector(15)
nonsil_audio=[]
while index + chunk_size < len(audio):
if not sil_detector.is_silence(audio[index: index+chunk_size]):
nonsil_audio.extend(audio[index: index + chunk_size])
index += chunk_size
return np.array(nonsil_audio)
def read_audio(filename, sample_rate=c.SAMPLE_RATE):
audio, sr = librosa.load(filename, sr=sample_rate, mono=True)
audio = VAD(audio.flatten())
start_sec, end_sec = c.TRUNCATE_SOUND_SECONDS
start_frame = int(start_sec * c.SAMPLE_RATE)
end_frame = int(end_sec * c.SAMPLE_RATE)
if len(audio) < (end_frame - start_frame):
# au = [0] * (end_frame - start_frame)
# for i in range(len(audio)):
# au[i] = audio[i]
# audio = np.array(au)
k = int(((end_frame - start_frame)/len(audio))+1)
audio = np.tile(audio,k)
return audio
def compareTwoVoice(model, embeddingInput, sampleItem):
feat2 = None
try:
dir = "sample-npy/"+sampleItem["_id"]+".npy";
feat2 = np.load(dir)
except:
feat2 = None
if feat2 is None:
sampleBase64Wav = sampleItem["voice_wav"]
cafSampleFile = base64.b64decode(sampleBase64Wav);
with open("wav_sample.wav", "wb") as fh:
fh.write(cafSampleFile)
utt2 = read_audio('wav_sample.wav')
# utt2 = utt2 / (2**15 - 1)
feat2 = extract_features(utt2)
feat2 = clipped_audio(feat2)
feat2 = feat2[np.newaxis, ...]
np.save("sample-npy/"+sampleItem["_id"], feat2)
emb2 = model.predict(feat2)
#print(emb1)
# similarity
# mul = np.multiply(emb1, emb2)
# s = np.sum(mul, axis=1)
# print(s)
similarity=batch_cosine_similarity(embeddingInput,emb2)
# print(cdist(emb1, emb2, metric="cosine"))
print(sampleItem["name"]+": "+str(similarity))
return similarity;
def predictEmbedding(model, inputBase64Wav):
cafInputFile = base64.b64decode(inputBase64Wav);
with open("wav_input.wav", "wb") as fh:
fh.write(cafInputFile)
utt1 = read_audio('wav_input.wav')
# utt1 = utt1 / (2**15 - 1)
feat1 = extract_features(utt1)
feat1 = clipped_audio(feat1)
feat1 = feat1[np.newaxis, ...]
emb1 = model.predict(feat1)
return emb1; | kienlanman/api-speaker-recognition | src/service/TransferAudio.py | TransferAudio.py | py | 3,269 | python | en | code | 0 | github-code | 13 |
11191694029 | names=[]
usernames=[]
entries=int(input("No of Entries : "))
for i in range(0,entries):
names.append(input("Entry {} ".format(i+1)))
for j in range(0,entries):
usernames.append(names[j].lower().replace(" ","_"))
for user in usernames:
print (user.title().replace("_"," "))
print(usernames)
| AshuAhlawat/Python | Basics/05loops1List2.py | 05loops1List2.py | py | 308 | python | en | code | 1 | github-code | 13 |
43828562963 | # Analytical computations
f, ax = plt.subplots(1,1)
f.suptitle('G* function with variable coefficients')
# import modules
import numpy as np
def Vi(ai,alphai):
return alphai**2/((1+2*alphai)*(1+ai)**2)
def V(a_prms,alpha):
D=1
for ai,alphai in zip(a_prms,alpha):
D*=(1+Vi(ai,alphai))
return D-1
def S_i(a,alpha):
S_i=np.zeros_like(a)
for i, (ai,alphai) in enumerate(zip(a,alpha)):
S_i[i]=Vi(ai,alphai)/V(a,alpha)
return S_i
def S_T(a,alpha):
# to be completed
S_T=np.zeros_like(a)
Vtot=V(a,alpha)
for i, (ai,alphai) in enumerate(zip(a,alpha)):
S_T[i]=(Vtot+1)/(Vi(ai,alphai)+1)*Vi(ai,alphai)/Vtot
return S_T
def update_Sobol(**kwargs):
import re
r = re.compile("([a-zA-Z]+)([0-9]+)")
ax.clear()
prm_cat=int(len(kwargs)/k)
prms=np.zeros((prm_cat,k))
for key, value in kwargs.items(): #find indx and value for a_prms
pre,post=r.match(key).groups()
cat_idx=strings.index(pre)
prms[cat_idx,int(post)]=value
Si[:]=S_i(prms[0,:],prms[1,:])
ST[:]=S_T(prms[0,:],prms[1,:])
width=0.4
x_tick_list=np.arange(len(prms[0,:]))+1
ax.set_xticks(x_tick_list+width/2)
x_labels=['x'+str(i) for i in np.arange(len(prms[0,:]))]
ax.set_xticklabels(x_labels)
ax.set_ylim(0,1)
ax.bar(x_tick_list,Si,width,color='blue')
ax.bar(x_tick_list+width,ST,width,color='red')
ax.legend(['First order indices','Total indices'])
k=4 #number of prms
strings=['a','alpha','delta']
a_lbls=[strings[0]+str(i) for i in np.arange(k)]
alpha_lbls=[strings[1]+str(i) for i in np.arange(k)]
delta_lbls=[strings[2]+str(i) for i in np.arange(k)]
Si=np.zeros(k)
ST=np.zeros(k)
a_prms=np.zeros(k)
alpha=np.zeros_like(a_prms)
delta=np.zeros_like(a_prms)
import ipywidgets as widgets
my_sliders=[]
for i in range(k):
my_sliders.append(widgets.FloatSlider(min=0, max=15, value=6.52, description=a_lbls[i]))
my_sliders.append(widgets.FloatSlider(min=0, max=15, value=1.0, description=alpha_lbls[i]))
my_sliders.append(widgets.FloatSlider(min=0, max=1.0, value=0.5, description=delta_lbls[i]))
slider_dict = {slider.description:slider for slider in my_sliders}
ui_left = widgets.VBox(my_sliders[0::3])
ui_mid = widgets.VBox(my_sliders[1::3])
ui_right = widgets.VBox(my_sliders[2::3])
ui=widgets.HBox([ui_left,ui_mid,ui_right])
out=widgets.interactive_output(update_Sobol, slider_dict)
display(ui,out)
# End Analytical computations
| utsekaj42/enthral_summer_school_2021 | Day 3/python_source/interactive_gstar_function.py | interactive_gstar_function.py | py | 2,515 | python | en | code | 1 | github-code | 13 |
31321867954 | from abc import ABC
from dataclasses import dataclass, field
import datetime
import os
from typing import Union
import copy
import yaml
import re
from dateutil.relativedelta import relativedelta
from datetime import date
__path: str = os.environ['TSDB_DATA']
parser_regex = '^([A-Za-z0-9\s\-]*)(_[A-Za-z0-9\s\-]*)?(_[A-Za-z0-9\s\-]*)?(_[\w\s\-]*)?(\.[A-Za-z0-9]*)?(@[A-Za-z0-9]*)?'
if os.path.exists(__path + 'market_coord_cfg.YAML'):
with open(__path + 'market_coord_cfg.YAML', 'r+') as f:
yml = yaml.load(f, Loader=yaml.FullLoader)
if yml:
__mkt_data_cfg = yml.get("market_coordinates", {})
__mkt_defaults_cfg = yml.get("defaults", {})
else:
__mkt_data_cfg = {}
__mkt_defaults_cfg = {}
else:
__mkt_data_cfg = {}
__mkt_defaults_cfg = {}
def mkt_data_cfg() -> dict:
"""returns the config for mkt data coordinates"""
return __mkt_data_cfg
def mkt_defaults_cfg() -> dict:
"""returns the config that defines defaults for the mkt coordinates"""
return __mkt_defaults_cfg
def tsdb_path() -> str:
""" returns the path to our data directory"""
return __path
def singleton(class_):
instances = {}
def getinstance(*args, **kwargs):
if class_ not in instances:
instances[class_] = class_(*args, **kwargs)
return instances[class_]
return getinstance
@dataclass
class MktCoord:
"""
mkt_class mkt_type mkt_asset point quote_style splitting char is _ and . for quote style and @ for source
Note: this is a dataclass and as such does not have an __init__ method as it is generated at initialization
"""
mkt_class: str
_mkt_class: str = field(init=False, repr=False)
mkt_type: str
_mkt_type: str = field(init=False, repr=False)
mkt_asset: str
_mkt_asset: str = field(init=False, repr=False)
point: str
_point: tuple = field(default=None, init=False, repr=False)
quote: str = None
_quote: str = field(default=None, init=False, repr=False)
source: str = None
_source: str = field(default=None, init=False, repr=False)
def get_mkt_tuple(self) -> tuple:
return self.mkt_class, self.mkt_type, self.mkt_asset, self.point, self.quote, self.source
@property
def mkt_class(self) -> str:
return self._mkt_class
@mkt_class.setter
def mkt_class(self, mkt_class: str):
self._mkt_class = mkt_class.upper()
@property
def mkt_type(self) -> str:
return self._mkt_type
@mkt_type.setter
def mkt_type(self, mkt_type: str):
self._mkt_type = mkt_type.upper()
@property
def mkt_asset(self) -> str:
return self._mkt_asset
@mkt_asset.setter
def mkt_asset(self, mkt_asset: str):
self._mkt_asset = mkt_asset.upper()
@property
def point(self):
return self._point
@point.setter
def point(self, point: str):
self._point = str(point).upper() if not isinstance(
point, property) and point is not None else None
@property
def source(self) -> str:
return self._source
@source.setter
def source(self, source: str):
self._source = str(source).upper() if not isinstance(
source, property) else "default"
@property
def quote(self) -> str:
return self._quote
@quote.setter
def quote(self, quote: str):
self._quote = str(quote).upper() if not isinstance(
quote, property) else None
def copy(self):
return copy.copy(self)
def deepcopy(self):
return copy.deepcopy(self)
def mkt_symbol(self, source_override=None) -> str:
"""returns the mkt symbol for the MktCoord"""
quote_string = '.' + \
self.quote if not (
self.quote == None or self.quote.lower() == "default") else ""
source_string = "default" if not self.source else self.source.upper()
point = [self.point] if self.point else []
if source_override:
mkt_str = "_".join(
[self.mkt_class, self.mkt_type, self.mkt_asset] + point) + quote_string + "@" + source_override
elif source_string.upper() != 'default'.upper():
mkt_str = "_".join(
[self.mkt_class, self.mkt_type, self.mkt_asset] + point) + quote_string + "@" + source_string
else:
default_source = get_coord_default_source(self)
mkt_str = "_".join([self.mkt_class, self.mkt_type, self.mkt_asset] + point) + quote_string + "@" + default_source
return mkt_str.upper()
def parse_mkt_coord(mkt_coord_str: str) -> MktCoord:
"""
mkt_class mkt_type mkt_asset point quote_style splitting char is _ and . for quote style and @ for a source
:param mkt_coord_str: input string to be parsed according to the above rules
:return:
"""
match = re.match(parser_regex, mkt_coord_str.upper()).groups()
source = "default".upper() if not match[5] else match[5].replace('@', '')
quote_style = "default".upper(
) if not match[4] else match[4].replace('.', '')
mkt_class = match[0].replace('_', '') # get type
mkt_type = match[1].replace('_', '') if match[1] else None # get asset
mkt_asset = match[2].replace('_', '') if match[2] else None # get class
point = match[3].replace('_', '') if match[2] else None # get point
return MktCoord(mkt_class, mkt_type, mkt_asset, point=point, quote=quote_style, source=source)
def get_coord_default_source(mkt_coord: MktCoord) -> Union[str, None]:
"""returns the default source for a given MktCoord"""
if mkt_coord.mkt_asset.upper() in [mkt_asset.upper() for mkt_asset in get_mkt_assets(mkt_coord)]:
return mkt_data_cfg()[mkt_coord.mkt_class.upper()][mkt_coord.mkt_type.upper()][mkt_coord.mkt_asset.upper()][
"default_source"].upper()
else:
return None
def get_ticker(coord: MktCoord) -> list:
"""return valid ticker for a MktCoord"""
if coord.mkt_asset.upper() in [asset.upper() for asset in get_mkt_assets(coord)]:
tickers = mkt_data_cfg()[coord.mkt_class.upper()][coord.mkt_type.upper()][coord.mkt_asset.upper()].get("tickers", coord.mkt_asset.upper())
points = get_points(coord)
if not len(points):
return tickers
else:
d = {points[i]:tickers[i] for i in range(len((points)))}
return d[coord.point]
def get_points(coord: MktCoord) -> list:
""" return the valid point for a MktCoord"""
if coord.mkt_asset.upper() in [asset.upper() for asset in get_mkt_assets(coord)]:
return list(
mkt_data_cfg()[coord.mkt_class.upper()][coord.mkt_type.upper()][coord.mkt_asset.upper()]["points"])
else:
return []
def get_mkt_assets(coord: MktCoord) -> list:
""" returns a list of valid mkt assets for a MktCoord"""
if coord.mkt_type.upper() not in [mkt_type.upper() for mkt_type in get_mkt_types(coord)]:
return []
else:
return list(mkt_data_cfg()[coord.mkt_class.upper()][coord.mkt_type.upper()].keys())
def get_mkt_types(coord: MktCoord) -> list:
""" returns a lost of valid mkt-types for MktCoord"""
if coord.mkt_class.upper() in [mkt_class.upper() for mkt_class in get_mkt_class()]:
return list(mkt_data_cfg()[coord.mkt_class.upper()].keys())
else:
return []
def get_mkt_class() -> list:
"""
return a list of all mkt_classs
"""
return list(mkt_data_cfg().keys())
def tenor_parse(tenor:str) -> relativedelta:
regex = "\d{1,2}[MYW]"
if not re.match(regex, tenor):
raise "Illegal Tenor"
if tenor[-1] == "M":
return relativedelta(months=int(tenor[:-1]))
if tenor[-2:] == "WK":
return relativedelta(weeks=int(tenor[:-2]))
if tenor[-1] == "Y":
return relativedelta(years=int(tenor[:-1]))
else:
return None
if __name__ == "__main__":
mkt_coord_str_1 = "equity_index_snp500_spot.bla@yahoo"
mkt_coord_str_1 = "equity_index_snp500_spot.bla"
match1 = re.match(parser_regex, mkt_coord_str_1).groups()
mkt_c2 = MktCoord("equity", "stock", "cash", ("a", "b"))
| imry-rosenbuam/jabberjaw | jabberjaw/utils/mkt_classes.py | mkt_classes.py | py | 8,183 | python | en | code | 0 | github-code | 13 |
26414737482 | from ursina import *
import Game
from ..Screen import Screen
from GameStates import GameStates
from Graphics.Container import Container
from Graphics.UIs.Inventory.Inventory import Inventory
from Overlays.Notification import Notification
from .SelectionStatus import SelectionStatus
from .MenuButton import MenuButton
from .Changelog import ChangelogMenu
from .TravelMenu import TravelMenu
from .GachaMenu import GachaMenu
class Selection(Screen):
def __init__(self):
self.status = SelectionStatus.changelog
self.status_changed = True
super().__init__(True, GameStates.mainMenu)
self.back_button.events += self.back_thing
self.menu = Container(
model='quad',
origin=(0, 0),
scale=(1.5, 0.7),
position=window.center,
color=color.clear
)
self.menu_buttons_container = Container(
position=(0, 0.5),
scale=(1, 0.2),
parent=self.menu
)
self.changelog_menu = ChangelogMenu(self.menu)
self.travel_menu = TravelMenu(self.menu)
self.inventory_menu = Inventory(True, self.menu)
self.inventory_menu.scale = (0.805, 0.85)
self.inventory_menu.y -= 0.05
self.gacha_menu = GachaMenu(self.menu)
self.changelog_button = MenuButton(
Game.language.changelog,
parent=self.menu_buttons_container
)
self.changelog_button.activate()
self.changelog_button.on_click_event += lambda: self.change_status(SelectionStatus.changelog)
self.travel_button = MenuButton(
Game.language.travel,
parent=self.menu_buttons_container
)
def check_equipped_character():
if Game.user.get_equipped_character() is not None:
self.change_status(SelectionStatus.travel)
else:
Game.notification_manager.add_notification(Notification("You don't have anyone equipped!", color.red))
self.travel_button.on_click_event += check_equipped_character
self.inventory_button = MenuButton(
Game.language.inventory,
parent=self.menu_buttons_container
)
self.inventory_button.on_click_event += lambda: self.change_status(SelectionStatus.inventory)
self.gacha_button = MenuButton(
Game.language.gacha,
parent=self.menu_buttons_container
)
self.gacha_button.on_click_event += lambda: self.change_status(SelectionStatus.gacha)
self.menus = [
self.changelog_menu,
self.travel_menu,
self.inventory_menu,
self.gacha_menu
]
self.menu_buttons = [
self.changelog_button,
self.travel_button,
self.inventory_button,
self.gacha_button
]
space_coefficient = 1
scale_x = 1 / (space_coefficient * len(self.menu_buttons))
center_offset = (len(self.menu_buttons) - 1) * scale_x / 2
for button in self.menu_buttons:
index = self.menu_buttons.index(button)
button.scale = (scale_x - (scale_x * 0.1), 0.5)
button.text_entity.scale = (button, 1)
button.position = ((index * scale_x) - center_offset, 0.35)
self.on_show += self._show
self.on_hide += self._hide
@property
def name(self) -> str:
return "Selection"
@property
def fades(self) -> bool:
return True
def deactivate_buttons(self):
for button in self.menu_buttons:
button.deactivate()
def disable_menus(self):
for menu in self.menus:
menu.disable()
def _show(self):
self.inventory_menu.update_player()
self.change_status(SelectionStatus.changelog)
self.menu.enable()
self.menu_buttons_container.enable()
def _hide(self):
self.menu.disable()
self.menu_buttons_container.disable()
self.disable_menus()
self.inventory_menu.show_entity_listing('characters')
def change_status(self, status: SelectionStatus):
self.status = status
self.status_changed = True
def back_thing(self):
Game.user.unload()
def update(self):
if self.status_changed:
self.deactivate_buttons()
self.disable_menus()
if self.status == SelectionStatus.changelog:
self.changelog_button.activate()
self.changelog_menu.enable()
elif self.status == SelectionStatus.travel:
self.travel_button.activate()
self.travel_menu.enable()
elif self.status == SelectionStatus.inventory:
self.inventory_button.activate()
self.inventory_menu.enable()
elif self.status == SelectionStatus.gacha:
self.gacha_button.activate()
self.gacha_menu.enable()
self.status_changed = False
| GDcheeriosYT/Gentrys-Quest-Ursina | Screens/Selection/Selection.py | Selection.py | py | 5,041 | python | en | code | 1 | github-code | 13 |
40263793600 |
import pandas as pd
from sklearn.decomposition import PCA
import numpy as np
def main():
xTrain=pd.read_csv("xTrain_normal.csv")
xTest=pd.read_csv("xTest_normal.csv")
pca=PCA()
X_pca_train=pca.fit_transform(xTrain)
X_pca_test=pca.transform(xTest)
np.savetxt("pca_train.csv",X_pca_train,delimiter=",")
np.savetxt("pca_test.csv",X_pca_test,delimiter=",")
# find out how many components need to capture at least 0.95
count=0
sum=0
for variance in pca.explained_variance_ratio_:
sum+=variance
count+=1
if(sum>=0.95):
break
print(count,"components needed to capture at least 95% variance of the original data.")
# check the first 3 components
pca=PCA(n_components=3)
X_pca=pca.fit_transform(xTrain)
print("-----First 3 principal components-----")
print(pca.components_)
if __name__ == "__main__":
main() | yikevding/cs334-machine-learning | hw5/q1b.py | q1b.py | py | 920 | python | en | code | 0 | github-code | 13 |
17380727562 | from random import randint
print('W E L C O M E T O')
print('*****' * 6 + '*')
print('*****ROCK, PAPER, SCISSORS*****')
print('*****' * 6 + '*')
t = ['Rock', 'Paper', 'Scissors']
computer = t[randint(0, 2)]
player = False
while player == False:
player = input('Rock[R], Paper[P], Scissors[S]?')
if player.lower() == computer.lower():
print('TIE!')
elif player.lower() == 'rock':
if computer.lower() == 'paper':
print('You lose', computer, 'covers', player)
else:
print('YOU WIN!!', player, 'smashes', computer)
elif player.lower() == 'paper':
if computer.lower() == 'rock':
print('You lose', computer, 'smashes', player)
else:
print('YOU WIN!', player, 'covers', computer)
elif player.lower() == 'paper':
if computer.lower() == 'scissors':
print('You lose', computer, 'cuts', player)
else:
print('YOU WIN!!', player, 'covers', computer)
elif player.lower() == 'scissors':
if computer.lower() == 'rock':
print('You lose.....', computer, 'smashes', player)
else:
print('YOU WIN!!!', player, 'cuts', computer)
elif player.lower() == 'rock':
if computer.lower() == 'scissors':
print('You lose....', computer, 'tries to cut', player)
else:
print('YOU WIN!!', player, 'smashes', computer)
elif player.lower() == 'scissors':
if computer.lower() == 'paper':
print('You lose....', computer, 'covers', player)
else:
print('YOU WIN!', player, 'cuts', computer)
else:
print('That is not a valid play! Check you spelling')
player = False
computer = t[randint(0, 2)]
| edake1/Programming-Projects | rock_paper_scissors.py | rock_paper_scissors.py | py | 1,833 | python | en | code | 0 | github-code | 13 |
44040932896 | from importlib.util import source_hash
fruits=[]
fruits.append("mango")
fruits.append("apple")
fruits.append("banana")
fruits.append("kiwi")
print(fruits)
if 'Mango' in fruits:
print(1)
else:
print(0)
fruits.insert(2,"strawberry")
print(fruits)
dry_fruits=["almonde","cashew","walnut", "dates", "rasins"]
merge_list=[]
fruits.extend(dry_fruits)
print(fruits)
#merge_list=fruits
#print(merge_list)
fruits.sort()
print(fruits)
fruits.remove("dates")
print(fruits)
#fruits.remove("berybery")
#print(fruits)
fruits.sort(reverse=True)
print(fruits)
| itzzyashpandey/python-data-science | data structures/list_fun.py | list_fun.py | py | 564 | python | en | code | 0 | github-code | 13 |
20461267249 | #!/usr/bin/env python3
import valve.rcon
from flask import Flask, request, redirect
app = Flask(__name__)
fields_translate = {
"CPU": "srcds_cpu",
"NetIn": "srcds_netin",
"NetOut": "srcds_netout",
"Uptime": "srcds_uptime",
"Maps": "srcds_maps",
"FPS": "srcds_fps",
"Players": "srcds_players",
"Svms": "srcds_svms",
"+-ms": "srcds_varms",
"~tick": "srcds_tick",
}
fields_help = {
"srcds_status": "server status",
"srcds_cpu": "process niceness",
"srcds_netin": "received traffic, kbps",
"srcds_netout": "sent traffic, kbps",
"srcds_uptime": "server uptime, minutes",
"srcds_maps": "number of maps played on that server since it's start",
"srcds_fps": "server's tick: 10 on idle",
"srcds_players": "number of players",
"srcds_svms": "ms per sim frame",
"srcds_varms": "ms variance",
"srcds_tick": "time in ms per tick"
}
@app.route('/')
def index():
return redirect('/metrics')
@app.route('/metrics')
def metrics():
host = request.args.get("ip")
port = request.args.get("port")
password = request.args.get("password")
out = []
out.append("#HELP srcds_status server status")
out.append("#TYPE srcds_status gauge")
try:
raw_data = valve.rcon.execute((host,int(port)), password, "stats")
(header, values) = raw_data.splitlines()
header_fields = header.split()
values_fields = values.split()
except Exception:
out.append("srcds_status 0")
return "\n".join(out)
i = 0
out.append("srcds_status 1")
while i < len(header_fields):
f = fields_translate.get(header_fields[i])
if not f:
continue
out.append("#HELP {} {}".format(f, fields_help[f]))
out.append("#TYPE {} gauge".format(f))
out.append("{} {}".format(f, values_fields[i]))
i = i + 1
return "\n".join(out)
if __name__ == '__main__':
app.run(debug=False, port=9591, host='0.0.0.0') | ezskillgg/srcds-exporter | srcds-exporter.py | srcds-exporter.py | py | 1,999 | python | en | code | 0 | github-code | 13 |
8821564512 | from tkinter import *
from tkinter import ttk
from tkinter.font import *
import cassiopeia
from tracker import *
import os
import configparser
class Window:
def __init__(self):
global nicknameWindow
nicknameWindow = Tk()
init()
labelAboutPlayer()
inputField()
lastUsedSummoners()
buttons()
deleteUserButton()
serverList()
nicknameWindow.mainloop()
def getlist(value):
return value.split(os.linesep)
def lastUsedSummoners():
global config, lastSummonersList, configPath, temp
temp = []
lastSummonersList = []
config = configparser.ConfigParser(converters={"list": getlist})
dir_path = '%s\\Summoners Tracker\\' % os.environ['APPDATA']
if not os.path.exists(dir_path):
os.makedirs(dir_path)
configPath = dir_path + 'settings.ini'
if not os.path.exists(configPath):
config.write(open(configPath, 'w'))
else:
config.read(configPath)
if config.has_section('NICKNAMES'):
temp = config.items("NICKNAMES")
else:
config.add_section('NICKNAMES')
for i in range(len(temp)):
lastSummonersList.append(str(temp[i][0]))
inputNick['values'] = lastSummonersList
def init():
nicknameWindow.title("LoL Summoners Tracker")
screen_height = nicknameWindow.winfo_screenheight()
screen_width = nicknameWindow.winfo_screenwidth()
position = str(int(screen_width / 2) - 150) + '+' + str(int(screen_height / 2) - 200)
nicknameWindow.geometry('300x400+' + position)
nicknameWindow.resizable(False, False)
def updateSelectedRegion(event):
index = inputNick.current()
serverIndex = SERVERS.index(temp[index][1])
serversList.current(serverIndex)
def inputField():
global inputNick
labelNick = Label(nicknameWindow, text="Nickname", font=('Arial', 15), justify=CENTER)
labelNick.place(relx=0.5, y=60, anchor=CENTER)
inputNick = ttk.Combobox(nicknameWindow, font=('Arial', 18), width=15, justify='center')
inputNick.place(relx=0.5, y=100, anchor=CENTER)
inputNick.bind("<<ComboboxSelected>>", updateSelectedRegion)
def deleteUser():
if inputNick.get() != "":
if inputNick.get() in lastSummonersList:
selectedUserIndex = inputNick.current()
config.remove_option('NICKNAMES', temp[selectedUserIndex][0])
del temp[selectedUserIndex]
del lastSummonersList[selectedUserIndex]
with open(configPath, 'w') as configfile:
config.write(configfile)
inputNick['values'] = lastSummonersList
inputNick.set('')
def deleteUserButton():
deleteUserB = Button(nicknameWindow, text="Delete selected user", font=('Arial', 12), command=deleteUser, fg="red")
deleteUserB.place(relx=0.5, y=155, anchor=CENTER)
def buttons():
nextB = Button(nicknameWindow, text="Confirm", font=('Arial', 14),
command=buttonClick)
nextB.place(relx=0.5, y=300, anchor=CENTER)
def labelAboutPlayer():
global infoLabel
font = Font(family='Helvetica', size=12, weight="bold")
infoLabel = Label(nicknameWindow, text="", font=font, fg="red", anchor='center')
infoLabel.pack()
def addSummonerToList(summoner):
if summoner.name not in lastSummonersList:
config.set('NICKNAMES', summoner.name, serversList.get())
with open(configPath, 'w') as configfile:
config.write(configfile)
lastUsedSummoners()
def showSummoners(summoner):
infoLabel['text'] = ""
try:
if summoner.current_match.exists:
addSummonerToList(summoner)
teams = summoner.current_match
if summoner in teams.blue_team.participants:
Tracker(teams.red_team, summoner, nicknameWindow)
else:
Tracker(teams.blue_team, summoner, nicknameWindow)
infoLabel['text'] = ""
except:
infoLabel['text'] = summoner.name + " is not in game!"
def buttonClick():
if inputNick.get() != "":
summoner = cassiopeia.get_summoner(name=inputNick.get(), region=serversList.get())
if summoner.exists:
cassiopeia.set_default_region(serversList.get())
t = threading.Thread(target=showSummoners, args=[summoner])
t.start()
def serverList():
global SERVERS
SERVERS = [
"EUNE",
"EUW",
"NA",
"KR",
"JP",
"BR",
"LAN",
"LAS",
"OCE",
"RU",
"TR"
]
global serversList
font = Font(family='Helvetica', size=14)
serversList = ttk.Combobox(nicknameWindow, width=10, font=font, values=SERVERS, justify=CENTER)
serversList.place(relx=0.5, y=210, anchor=CENTER)
serversList.current(0)
| May2Beez/LoL-Summoners-Tracker | window.py | window.py | py | 4,791 | python | en | code | 0 | github-code | 13 |
9087968850 | #https://www.acmicpc.net/problem/2696
#백준 2696번 중앙값 구하기 (자료구조)
#import sys
#input = sys.stdin.readline
import heapq
t = int(input())
for _ in range(t):
n = int(input())
nums = []
temp = n
while temp > 0 :
data = list(map(int, input().split()))
nums.extend(data)
temp -= 10
result = []
maxheap = []
minheap = []
for i in range(n):
if len(maxheap) != len(minheap):
heapq.heappush(minheap,nums[i])
else:
heapq.heappush(maxheap,-nums[i])
if minheap :
while minheap[0] < -maxheap[0]:
minnum = heapq.heappop(minheap)
maxnum = heapq.heappop(maxheap)
heapq.heappush(minheap,-maxnum)
heapq.heappush(maxheap,-minnum)
if i%2 == 0 :
result.append(-maxheap[0])
cnt = 0
print(len(result))
while cnt < len(result) :
if cnt+10 > len(result):
print(*result[cnt:len(result)])
else:
print(*result[cnt: cnt+10])
cnt += 10 | MinsangKong/DailyProblem | 07-20/4-1.py | 4-1.py | py | 1,129 | python | en | code | 0 | github-code | 13 |
37945008218 | ###############################################################
# 25/02/2007 Andrzej Olszewski
# jobOptions to run Hydjet generation
# Random number seed setting via nseed
# 15/03/2008 Andrzej Olszewski
# Updated for configurables
#==============================================================
###############################################################
#
# An example of job options file for Hydjet generation of
# Pb + Pb collisions at 5520 GeV/(colliding nucleon pair)
#
#==============================================================
# General Application Configuration options
#--------------------------------------------------------------
include( "AthenaPoolCnvSvc/WriteAthenaPool_jobOptions.py" )
from AthenaCommon.Configurable import Configurable
from AthenaPoolCnvSvc.WriteAthenaPool import AthenaPoolOutputStream
# configuring the Athena application for a 'generator' job
import AthenaCommon.AtlasUnixGeneratorJob
# make sure we are loading the ParticleProperty service
from PartPropSvc.PartPropSvcConf import PartPropSvc
svcMgr += PartPropSvc()
#--------------------------------------------------------------
# Private Application Configuration options
#--------------------------------------------------------------
## get a handle on the top sequence of algorithms
from AthenaCommon.AlgSequence import AlgSequence
topAlg = AlgSequence()
from Hydjet_i.Hydjet_iConf import Hydjet
topAlg += Hydjet()
from TruthExamples.TruthExamplesConf import PrintHijingPars
topAlg += PrintHijingPars()
StoreGateSvc = svcMgr.StoreGateSvc
StoreGateSvc.Dump = TRUE
# Set output level threshold (2=DEBUG, 3=INFO, 4=WARNING, 5=ERROR, 6=FATAL )
svcMgr.MessageSvc.OutputLevel = 3
svcMgr.MessageSvc.defaultLimit = 100000
#--------------------------------------------------------------
# Event related parameters
#--------------------------------------------------------------
# Number of events to be processed (default is 10)
theApp.EvtMax = 10
# Set run number (default 0 causes problems)
svcMgr.EventSelector.RunNumber = 12345
svcMgr.EventSelector.FirstEvent = 1
#--------------------------------------------------------------
# Algorithms Private Options
#--------------------------------------------------------------
# Use fixed seeds for reproducibility
seed1 = 123456789
seed1 &= 0xfffffffe
Hydjet = Algorithm( "Hydjet" )
Hydjet.McEventKey = "GEN_EVENT"
Hydjet.Initialize = ["e 5520", "a 208", "nh 20000",
#"ifb 0", "bfix 0", # fixed impact [fm]
"ifb 1", "bmin 0", "bmax 30", # impact range [fm]
"nhsel 2", "ishad 1", "ptmin 10.0",
"ylfl 4.0", "ytfl 1.5", "tf 0.1", "fpart 1",
"ienglu 0", "ianglu 0", "t0 1", "tau0 0.1", "nf 0",
"mstp 51 7", "mstp 81 0", "mstu 21 1", "paru 14 1.0",
"msel 1", "nseed "+str(seed1) ]
#---------------------------------------------------------------
# Pool Persistency
#---------------------------------------------------------------
from AthenaPoolCnvSvc.WriteAthenaPool import AthenaPoolOutputStream
Stream1 = AthenaPoolOutputStream( "StreamEVGEN" )
theApp.Dlls += [ "GeneratorObjectsAthenaPoolPoolCnv" ]
PoolSvc = Service( "PoolSvc" )
# Name of the output file
Stream1.OutputFile = "hydjet.test.pbpb5520.r12345.pool.root"
# 2101 == EventInfo
# 133273 == MCTruth (HepMC)
# 54790518 == HijigEventParams
Stream1.ItemList += [ "2101#*" ]
Stream1.ItemList += [ "133273#*" ]
Stream1.ItemList += [ "54790518#*" ]
#==============================================================
#
# End of job options file
#
###############################################################
| rushioda/PIXELVALID_athena | athena/Generators/Hydjet_i/share/hydjet.minbias.pbpb5520.r12345.job.py | hydjet.minbias.pbpb5520.r12345.job.py | py | 3,686 | python | en | code | 1 | github-code | 13 |
4788017218 | import re
text = input()
matched = re.finditer(r"(^|(?<=\s))-?([0]|[1-9][0-9]*)(.[0-9]+)?($|(?=\s))", text)
output = []
for match in matched:
output.append(match.group())
print(" ".join(output)) | Iskren-Dimitrov/SoftUni_Python_Fundamentals | lab_regular_expressions/match_numbers.py | match_numbers.py | py | 202 | python | en | code | 0 | github-code | 13 |
36581334432 | import pynput
from pynput.keyboard import Key, Listener
keys = []
def on_press(key):
keys.append(key)
write_file(keys)
def write_file(keys):
with open('log.txt', 'w') as f:
for key in keys:
#removing ''
k = str(key).replace("'", "")
f.write(k)
#explicitly adding a space after every keystroke for readability
f.write(' ')
def on_release(key):
if key == Key.delete:
return False
with Listener(on_press = on_press, on_release = on_release) as listener:
listener.join() | hastagAB/Awesome-Python-Scripts | Keylogger/script.py | script.py | py | 526 | python | en | code | 1,776 | github-code | 13 |
21358514143 | import time
import cv2
import os
import numpy
import torch
import clip
import numpy as np
from PIL import Image
from scipy import spatial
from config import CLIP_MODEL_PATH, OP_NUM_THREADS
from service.image_infer import get_ui_infer
from dbnet_crnn.image_text import ImageText
from service.image_utils import get_roi_image, img_show, get_image_patches, proposal_fine_tune, get_infer_area
from torchvision.transforms import Compose, Resize, CenterCrop, ToTensor, Normalize
import onnxruntime
try:
from torchvision.transforms import InterpolationMode
BICUBIC = InterpolationMode.BICUBIC
except ImportError:
BICUBIC = Image.BICUBIC
def cosine_similar(l1, l2):
return 1 - spatial.distance.cosine(l1, l2)
def _convert_image_to_rgb(image):
return image.convert("RGB")
def target_roi_text_diff_rate(target_img, source_img, proposals, tp):
image_text = ImageText()
count = 0
target_l = target_img.shape[0] if target_img.shape[0] > target_img.shape[1] else target_img.shape[1]
target_text = image_text.get_text(target_img, target_l)
x1, y1, x2, y2 = list(map(int, proposals[tp[-1]]['elem_det_region']))
roi = get_roi_image(source_img, [[x1, y1], [x2, y1], [x2, y2], [x1, y2]])
roi_l = roi.shape[0] if roi.shape[0] > roi.shape[1] else roi.shape[1]
source_text = image_text.get_text(roi, roi_l)
for t in target_text:
for s in source_text:
if t['text'] in s['text'] or s['text'] in t['text']:
count = count + 1
break
rate = count / len(target_text) if len(target_text) > 0 else 0
return rate
def filter_patches(target_image, patches):
result = []
target_h, target_w, _ = target_image.shape
for patch in patches:
x0, y0, x1, y1 = patch['elem_det_region']
patch_w = x1 - x0
patch_h = y1 - y0
if abs((patch_w / patch_h) - (target_w / target_h)) < 0.5:
result.append(patch)
return result
def get_proposals(target_image, source_image_path, provider="ui-infer"):
"""
选择区域来源,只需提供位置
"""
# ui-infer,模型推理
if provider == 'ui-infer':
image_infer_result = get_ui_infer(source_image_path, 0.01)
# patches,滑动窗口
else:
h, w, _ = target_image.shape
resolution_map = {'M': [0.6, 0.6], 'N': [0.5, 0.5]}
image_infer_result = []
for k in resolution_map.keys():
resolution = resolution_map[k]
source_img = cv2.imread(source_image_path)
_h, _w, _ = source_img.shape
if w >= _w:
ratio = _w / w
target_image = cv2.resize(target_image, (0, 0), fx=ratio, fy=ratio)
h, w, _ = target_image.shape
resolution[0] = round(resolution[0]/2, 1) if _w / w < 6 else resolution[0]
resolution[1] = round(resolution[1]/2, 1) if _h / h < 6 else resolution[1]
image_infer_result.extend(get_image_patches(source_img, w, h, resolution[0], resolution[1]))
image_infer_result = filter_patches(target_image, image_infer_result)
return image_infer_result
class ImageTrace(object):
def __init__(self):
self.device = "cuda" if torch.cuda.is_available() else "cpu"
print(f"Using {self.device}.\nStart loading model")
self.n_px = 224
self.template_target_image = np.zeros([100, 100, 3], dtype=np.uint8) + 100
self.preprocess = self._get_preprocess()
so = onnxruntime.SessionOptions()
so.intra_op_num_threads = OP_NUM_THREADS
cuda_provider = 'CUDAExecutionProvider'
provider = cuda_provider if cuda_provider in onnxruntime.get_available_providers() and self.device == 'cuda' \
else 'CPUExecutionProvider'
print(f"ORT provider: {provider}")
self.ort_sess = onnxruntime.InferenceSession(CLIP_MODEL_PATH, sess_options=so,
providers=[provider])
print("Finish loading")
def _get_preprocess(self):
return Compose([
Resize(self.n_px, interpolation=BICUBIC),
CenterCrop(self.n_px),
_convert_image_to_rgb,
ToTensor(),
Normalize((0.48145466, 0.4578275, 0.40821073), (0.26862954, 0.26130258, 0.27577711)),
])
def search_image(self, target_image_info: dict, source_image_path, top_k, image_alpha, text_alpha, provider):
top_k = top_k # 最大匹配数量
image_alpha = image_alpha # 图相关系数
text_alpha = text_alpha # 文本相关系数
roi_list = []
img_text_score = []
target_image = target_image_info.get('img', self.template_target_image)
target_image = cv2.imread(target_image) if isinstance(target_image, str) else target_image
target_image_desc = target_image_info.get('desc', '')
source_image = cv2.imread(source_image_path)
proposals = get_proposals(target_image, source_image_path, provider=provider)
text = clip.tokenize([target_image_desc])
# 提取检测目标
for roi in proposals:
x1, y1, x2, y2 = list(map(int, roi['elem_det_region']))
roi = get_roi_image(source_image, [[x1, y1], [x2, y1], [x2, y2], [x1, y2]])
img_pil = Image.fromarray(cv2.cvtColor(roi, cv2.COLOR_BGR2RGB))
roi_list.append(self.preprocess(img_pil))
# 计算图像和文本匹配向量
img_pil = Image.fromarray(cv2.cvtColor(target_image, cv2.COLOR_BGR2RGB))
target_image_input = self.preprocess(img_pil).unsqueeze(0).clone().detach()
source_image_input = torch.tensor(np.stack(roi_list))
_, logits_per_text, source_image_features, = self.ort_sess.run(
["LOGITS_PER_IMAGE", "LOGITS_PER_TEXT", "onnx::Mul_3493"],
{"IMAGE": source_image_input.numpy(), "TEXT": text.numpy()}
)
probs = torch.from_numpy(logits_per_text).softmax(dim=-1).cpu().numpy()
if image_alpha != 0:
target_image_features = self.ort_sess.run(
["onnx::Mul_3493"], {"IMAGE": target_image_input.numpy(), "TEXT": text.numpy()}
)
else:
target_image_features = numpy.zeros([len(target_image_input)], dtype=np.uint8)
# 图像加文本
for i, source_image_feature in enumerate(source_image_features):
score = cosine_similar(target_image_features[0][0], source_image_feature) if image_alpha != 0 else 0
img_text_score.append(score * image_alpha + probs[0][i] * text_alpha)
max_confidence = round(np.max(img_text_score) / (image_alpha + text_alpha), 3)
target_image_infer = get_ui_infer(target_image, 0.1)
target_area_rate = get_infer_area(target_image_infer) / (target_image.shape[0] * target_image.shape[1])
score_norm = (img_text_score - np.min(img_text_score)) / (np.max(img_text_score) - np.min(img_text_score))
top_k_ids = np.argsort(score_norm)[-top_k:]
proposal_fine_tune(score_norm, proposals, 0.9)
# CLIP refer text in image
if target_area_rate < 0.1 and provider == 'patches':
text_rate = target_roi_text_diff_rate(target_image, source_image, proposals, np.argsort(score_norm)[:])
max_confidence = max_confidence - 0.08*(1-text_rate)
return top_k_ids, score_norm, proposals, max_confidence
def get_trace_result(self, target_image_info, source_image_path, top_k=3, image_alpha=1.0,
text_alpha=0.6, proposal_provider='ui-infer'):
top_k_ids, scores, infer_result, max_confidence = self.search_image(
target_image_info, source_image_path, top_k, image_alpha, text_alpha, proposal_provider
)
print(f"Max confidence:{max_confidence}")
cls_ids = np.zeros(len(top_k_ids), dtype=int)
boxes = [infer_result[i]['elem_det_region'] for i in top_k_ids]
scores = [float(scores[i])*max_confidence for i in top_k_ids]
image_show = img_show(cv2.imread(source_image_path), boxes, scores, cls_ids, conf=0.5, class_names=['T'])
return image_show
def video_target_track(self, video_path, target_image_info, work_path):
video_cap = cv2.VideoCapture(video_path)
_, im = video_cap.read()
fourcc = cv2.VideoWriter_fourcc(*"mp4v")
im_save_path = os.path.join(work_path, 'im_temp.png')
video_out_path = os.path.join(work_path, 'video_out.mp4')
out = cv2.VideoWriter(video_out_path, fourcc, 20, (im.shape[1], im.shape[0]))
i = 0
while 1:
i = i + 1
if i % 2 == 0:
continue
print(f"video parsing {i}")
ret, im = video_cap.read()
if ret:
cv2.imwrite(im_save_path, im)
trace_result = self.get_trace_result(target_image_info, im_save_path, top_k=1)
out.write(trace_result)
else:
print("finish.")
out.release()
break
image_trace = ImageTrace()
def trace_target_video():
target_image_info = {
'path': "./capture/local_images/img_play_icon.png",
'desc': "picture with play button"
}
target_image_info['img'] = cv2.imread(target_image_info['path'])
video_path = "./capture/local_images/video.mp4"
work_path = './capture/local_images'
image_trace.video_target_track(video_path, target_image_info, work_path)
def search_target_image():
"""
# robust target image search
"""
# 图像目标系数
image_alpha = 0.0
# 文本描述系数
text_alpha = 1.0
# 最大匹配目标数量
top_k = 1
# 调试用,构造目标图像
target_img = np.zeros([100, 100, 3], dtype=np.uint8)+255
cv2.putText(target_img, 'Q', (30, 60), cv2.FONT_HERSHEY_SIMPLEX, 2.0, (0, 0, 0), thickness=3)
# target_img = cv2.imread("./capture/local_images/mario.png")
# 目标语言描述
desc = "mario"
target_image_info = {'img': target_img, 'desc': desc}
source_image_path = "./capture/image_2.png"
trace_result_path = "./capture/local_images/"
if not os.path.exists(trace_result_path):
os.mkdir(trace_result_path)
# 查找目标
t1 = time.time()
image_trace_show = image_trace.get_trace_result(target_image_info, source_image_path, top_k=top_k,
image_alpha=image_alpha, text_alpha=text_alpha,
proposal_provider='ui-infer')
print(f"Infer time:{round(time.time() - t1, 3)} s", )
cv2.imwrite(trace_result_path+'trace_result.png', image_trace_show)
print(f"Result saved {trace_result_path}")
if __name__ == '__main__':
assert os.path.exists(CLIP_MODEL_PATH)
search_target_image()
| Meituan-Dianping/vision-ui | service/image_trace.py | image_trace.py | py | 10,854 | python | en | code | 185 | github-code | 13 |
17703732109 | from django.core.management.base import BaseCommand, CommandError
from apps.orders.models import Order, OrderItem
from apps.shipments.models import Shipment, ShipmentLog
from django.utils import timezone
from django.contrib.auth.models import User
import requests
import json
import datetime
API_Key = 'b313e7c9662a02870c0d1b8a0cb9e683'
class Command(BaseCommand):
def handle(self, *args, **options):
params = {
"apiKey": API_Key,
"modelName": "TrackingDocument",
"calledMethod": "getStatusDocuments",
"methodProperties": {
"Documents": [
{
"DocumentNumber": Shipment.objects.values_list('invoice_id', flat=True).get(public_id=1),
"Phone": Order.objects.values_list('phone', flat=True).get(public_id=1)
}
]
}
}
response = requests.post('http://testapi.novaposhta.ua/v2.0/en/documentsTracking/json', json=params,
headers={'Content-Type': 'application/json'})
if response.status_code == 200:
save_log(params, response)
print(response.text)
else:
save_log(params, response)
print(response.status_code)
| oshevelo/sep_py_shop | FirstShop/apps/shipments/management/commands/deliverytrack.py | deliverytrack.py | py | 1,310 | python | en | code | 0 | github-code | 13 |
13284799256 | import random
import time
import os
print("你好,现在你有10秒钟的时间记忆下列物品及其编号")
things=["苹果","香蕉","橙子","梨子","猕猴桃","柚子","猴魁","铁观音","毛笔","宣纸"]
for i in range(10):
print(i,":",things[i])
time.sleep(10)
os.system("cls")
n=0
t2=random.sample(things,5)
for i in t2:
ans=int(input(i+"的编号是:"))
if i==things[ans]:
n=n+1
print("\n你一共答对了",n,"次")
input("\n按回车键结束程序")
| hesongji/python- | 小游戏.py | 小游戏.py | py | 509 | python | en | code | 1 | github-code | 13 |
36733558654 | from django.http import HttpResponse,HttpResponseNotFound,HttpResponseRedirect
from django.template import RequestContext
from django.shortcuts import render_to_response,get_object_or_404,get_list_or_404
from django.core.urlresolvers import reverse
from django.contrib.auth import authenticate,login,logout
from django.contrib.auth.decorators import login_required
#import facebook.djangofb as facebook
#from facebookconnect.models import FacebookProfile
from django.contrib.auth.tokens import default_token_generator
from django.contrib.sites.models import Site
from django.contrib.auth.models import User
from community.forms import RegistrationForm
from django.contrib.auth.forms import AuthenticationForm,PasswordChangeForm
from django.views.decorators.cache import never_cache
from reversion.models import Revision
from boycott import settings
from target.models import Campaign,Company,Product,Store,ProductAction,CompanyAction
from community.models import UserProfile
from community.forms import UserProfileForm
@never_cache
def login_view(request):
"Displays the login form and handles the login action."
redirect_to = request.REQUEST.get('next')
if request.method == "POST":
form = AuthenticationForm(data=request.POST)
if form.is_valid():
# Light security check -- make sure redirect_to isn't garbage.
if not redirect_to or '//' in redirect_to or ' ' in redirect_to:
redirect_to = settings.LOGIN_REDIRECT_URL
from django.contrib.auth import login
login(request, form.get_user())
if request.session.test_cookie_worked():
request.session.delete_test_cookie()
return HttpResponseRedirect(redirect_to)
else:
#print "Invalid Login"
pass
else:
form = AuthenticationForm(request)
request.session.set_test_cookie()
return render_to_response("registration/login.html", {'form': form}, context_instance=RequestContext(request))
def logout_view(request):
logout(request)
#clear cache?
return HttpResponseRedirect("/")
@login_required
def change_password_view(request):
if request.method == "POST":
form = PasswordChangeForm(request.user, request.POST)
if form.is_valid():
form.save()
return HttpResponseRedirect(reverse("auth_login"))
else:
form = PasswordChangeForm(request.user)
return render_to_response('community/change_password.html', {'form': form}, context_instance=RequestContext(request))
def register_view(request):
redirect_to = request.REQUEST.get('next')
if request.method == 'POST':
form = RegistrationForm(data=request.POST)
if form.is_valid():
opts = {}
opts['use_https'] = request.is_secure()
opts['email_template_name'] = 'registration/registration_email.txt'
form.save(**opts) #create the user object
user = User.objects.get(username=form.cleaned_data['username'])
User.set_password(user,form.cleaned_data['password']) #hash password for the database
#log in immediately
#should use authenticate() here to set backend automatically
#but that seems to conflict with facebook connect
#so do it manually
user.backend='django.contrib.auth.backends.ModelBackend'
login(request, user)
# Light security check -- make sure redirect_to isn't garbage.
if not redirect_to or '//' in redirect_to or ' ' in redirect_to:
redirect_to = settings.LOGIN_REDIRECT_URL
if request.session.test_cookie_worked():
request.session.delete_test_cookie()
return HttpResponseRedirect(redirect_to)
else:
form = RegistrationForm()
return render_to_response('registration/registration_form.html',
{'form': form},
RequestContext(request))
def xd_receiver(request):
'''For Facebook login'''
return render_to_response('facebook/xd_receiver.html')
def user_view_all(request):
users = User.objects.all().order_by("-date_joined")
# request_fb_profile = FacebookProfile.objects.get(user=request.user)
# if (request_fb_profile):
# fb = "You are logged in to Facebook as %s" % request_fb_profile.full_clean()
# #a facebook user is logged in,
# #use their credentials to pull info from fb users into django users
# for p in FacebookProfile.objects.all():
# u = p.user
# if u.first_name is not None:
# u.first_name = p.first_name
# if u.last_name is not None:
# u.last_name = p.last_name
# if u.email is not None:
# u.email = p.email
# u.save()
#TODO paginate
return render_to_response('community/user_list.html',
{'users':users,'fb_logged_in':fb},
context_instance = RequestContext(request))
def user_view(request,username):
u = get_object_or_404(User,username=username)
my_profile = u.profile
my_campaigns_started = Campaign.objects.filter(added_by=u)
my_companies_added = Company.objects.filter(added_by=u)
my_products_added = Product.objects.filter(added_by=u)
my_stores_added = Store.objects.filter(added_by=u)
my_campaigns_joined = my_profile.campaigns.all()
my_product_actions = ProductAction.objects.select_related('product').filter(campaign__in=my_campaigns_joined)
my_company_actions = CompanyAction.objects.select_related('company').filter(campaign__in=my_campaigns_joined)
my_revisions = Revision.objects.select_related('version').filter(user=u).order_by("-date_created")[:10]
#clean up list
return render_to_response('community/user_single.html',
{'the_user':u, #don't use "user", because that will overwrite the request context user,
'my_campaigns':my_campaigns_started,
'my_companies':my_companies_added,
'my_products':my_products_added,
'my_stores':my_stores_added,
'campaigns_joined':my_campaigns_joined,
'company_actions':my_company_actions,
'product_actions':my_product_actions,
'revisions':my_revisions},
context_instance = RequestContext(request))
@login_required
def user_edit(request,username):
#only let people edit their own profiles
if request.user.username != username:
return HttpResponseRedirect(request.user.profile.get_absolute_url() + 'edit')
profile = get_object_or_404(UserProfile,user__username=username)
if request.POST:
form = UserProfileForm(request.POST,instance=profile)
if form.is_valid():
profile = form.save()
return HttpResponseRedirect(profile.get_absolute_url())
else:
message = "Please correct the errors below"
else:
form = UserProfileForm(instance=profile)
message = "Edit your user profile below"
return render_to_response("community/user_profile_edit.html",
{"message":message,"form": form},
context_instance = RequestContext(request))
#@facebook.require_login()
def facebook_canvas(request):
if request.user.is_anonymous():
#we need to make fb/django connection
if request.method == "POST":
form = AuthenticationForm(data=request.POST)
if form.is_valid():
from django.contrib.auth import login
login(request, form.get_user())
if request.session.test_cookie_worked():
request.session.delete_test_cookie()
#we're logged in, go ahead with rest of render
else:
form = AuthenticationForm(request)
request.session.set_test_cookie()
return render_to_response('facebook/please_login.html',{'form': form}, context_instance=RequestContext(request))
my_profile = request.user.profile
my_campaigns = my_profile.campaigns.all()
my_product_actions = ProductAction.objects.select_related('product').filter(campaign__in=my_campaigns)
my_company_actions = CompanyAction.objects.select_related('company').filter(campaign__in=my_campaigns)
my_revisions = Revision.objects.select_related('version').filter(user=request.user).order_by("-date_created")[:10]
return render_to_response('community/canvas.fbml',{'campaigns':my_campaigns,
'company_actions':my_company_actions,
'product_actions':my_product_actions,
'revisions':my_revisions},
context_instance = RequestContext(request))
def recent_edits(request):
edits = Revision.objects.select_related('version').all().order_by("-date_created")[:25]
return render_to_response('community/recent_edits.html',{'edits':edits},
context_instance = RequestContext(request))
def stats(request):
revisions = Revision.objects.select_related('version').all().order_by("user")
users = {}
for r in revisions:
if users.has_key(r.user):
users[r.user] += 1
else:
users[r.user] = 1
return render_to_response('community/stats.html',{'users':users},
context_instance = RequestContext(request))
| jlev/Boycott-Toolkit | community/views.py | views.py | py | 9,679 | python | en | code | 6 | github-code | 13 |
30138655120 | import sys
def find_max_crossing_subarray(A, low, mid, high):
left_sum = -sys.maxsize
sum = 0
max_left = -1
for i in range(mid, low-1, -1): # important that you go "downto", because you need to know what comes AFTER idx i (we are finding the CROSS sum)
sum += A[i]
if sum > left_sum:
left_sum = sum
max_left = i
right_sum = -sys.maxsize
sum = 0
max_right = -1
for i in range(mid+1, high+1): # Important the you go "upto" because you need to know what comes BEFORE idx i (we are finding the CROSS sum)
sum += A[i]
if sum > right_sum:
right_sum = sum
max_right = i
return (max_left, max_right, left_sum+right_sum)
def find_maximum_subarray(A,low, high):
if high == low: # If there is only one element left
return (low,high,A[low]) # return it
mid = (low + high) // 2
(left_low, left_high, left_sum) = find_maximum_subarray(A, low, mid) # Find highest left sum
(right_low, right_high, right_sum) = find_maximum_subarray(A, mid+1, high) # Find highest right sum
(cross_low, cross_high, cross_sum) = find_max_crossing_subarray(A, low, mid, high) # Find highest cross sum
if left_sum >= right_sum and left_sum >= cross_sum: # If left sum is highest, return it
return left_low, left_high, left_sum
elif right_sum >= left_sum and right_sum >= cross_sum: # If right sum is highest, return it
return right_low, right_high, right_sum
return cross_low, cross_high, cross_sum # If cross sum is highest, return it
testArray = [10,15,20,-5,30,-20,-10,-30,5,40]
print(find_maximum_subarray(testArray,0,len(testArray)-1)) # FUNKER :D
| Xtrah/TDT4120 | Python/DivideAndConquer/MaximumSubarray.py | MaximumSubarray.py | py | 1,696 | python | en | code | 6 | github-code | 13 |
35345446024 | import os
from PySide import QtCore, QtGui
from shiboken import wrapInstance
import Pipeline.UI.main_ui as master_ui
import maya.OpenMayaUI as omui
from Pipeline.media.UI_Converter import Convert_Ui
import Pipeline
class Converter(object):
def __init__(self):
# all the relative folders for our project
self.directory_of_ui = os.path.dirname(os.path.realpath(__file__))
self.directory_of_root = os.path.join(self.directory_of_ui,'..')
self.directory_of_media = os.path.join(self.directory_of_ui,'..','media')
self.directory_of_submodules = os.path.join(self.directory_of_ui,'..','submodules')
self.directory_of_main = os.path.join(self.directory_of_ui,'..','main')
self.directory_of_games_2d = os.path.join(self.directory_of_ui,'..','submodules','games_2d')
self.directory_of_games_3d = os.path.join(self.directory_of_ui,'..','submodules','games_3d')
def convert_ui(self):
files_in_dir = os.listdir(self.directory_of_media)
files_to_convert = []
for file_in_dir in range(0,len(files_in_dir)):
if os.path.splitext(files_in_dir[file_in_dir])[1] == '.ui':
files_to_convert+=[files_in_dir[file_in_dir]]
for file_to_convert in files_to_convert:
file_name = os.path.splitext(file_to_convert)[0]
input_name = os.path.join(self.directory_of_media,file_to_convert)
output_name = os.path.join(self.directory_of_ui,file_name+'.py')
make_convert = Convert_Ui()
make_convert.convert(input_name,output_name)
lets_convert_this = Converter()
lets_convert_this.convert_ui()
reload (Pipeline.UI.main_ui)
def maya_main():
maya_main_window_pointer = omui.MQtUtil.mainWindow()
return wrapInstance(long(maya_main_window_pointer), QtGui.QWidget)
class Main_ui_controller(QtGui.QMainWindow):
def __init__(self, parent=None):
super(Main_ui_controller, self).__init__(parent)
self.setWindowFlags(QtCore.Qt.Tool)
self.myui = master_ui.Ui_MainWindow()
self.myui.setupUi(self)
#self.ui.pushButton.clicked.connect(self.someFunc)
def set_up_art(self):
pass
def push_import_button(self):
pass
main_ui = Main_ui_controller(parent=maya_main())
main_ui.show()
| underminerstudios/ScriptBackup | MayaAssetPipeline/Maya_Tools/Pipeline/UI/Pipeline_Gui.py | Pipeline_Gui.py | py | 2,409 | python | en | code | 2 | github-code | 13 |
27342569784 | #!/usr/bin/env python
from __future__ import print_function
import os
import sys
import json
INFO = {
'version': '0.4-dev',
}
def main():
"Run functions specified on the command line"
if len(sys.argv) <= 1:
raise SystemExit("no command(s) specified")
cmds = sys.argv[1:]
if '-h' in cmds or '--help' in cmds:
raise SystemExit("usage: " + sys.argv[0] + " <func-name> [<func-name>]")
glbs = globals()
for cmd in cmds:
if cmd not in glbs:
raise SystemExit(cmd + " not found")
for cmd in cmds:
if callable(glbs[cmd]):
glbs[cmd]()
else:
raise SystemExit(cmd + " not callable")
def metadata(path="pyne/metadata.json"):
"""Build a metadata file."""
md = {}
md.update(INFO)
# FIXME: Add the contents of CMakeCache.txt to the metadata dictionary
# write the metadata file
with open(path, 'w') as f:
json.dump(md, f, indent=2)
return md
def final_message(success=True):
if success:
return
metadata = None
mdpath = os.path.join('pyne', 'metadata.json')
if os.path.exists(mdpath):
with open(mdpath) as f:
metadata = json.load(f)
if metadata is not None:
msg = "\n\nCURRENT METADATA:\n"
for k, v in sorted(metadata.items()):
msg += " {0} = {1}\n".format(k, repr(v))
print(msg[:-1])
if os.name != 'nt':
return
try:
import tables as tb
h5ver = tb.getHDF5Version()
except ImportError:
h5ver = '1.8.5-patch1'
msg = ("\n\nUSAGE: "
"python setup.py <distutils-args> [-- <cmake-arg>] [-- <make-args>]\n"
"CMake and make command line arguments are optional, but must be preceeded "
"by '--'.\n"
"\n\nIf compilation is failing with HDF5 issues please try the "
"following steps:\n\n"
" 1. Install EPD [1].\n"
" 2. Download the HDF5 Windows binarys from [2].\n"
" 3. Unzip them to the C-drive (C:\\hdf5-{h5ver}).\n"
" 4. Re-run setup with the '--hdf5' option:\n\n"
" python setup.py install --user --hdf5=C:\\hdf5-{h5ver}\n\n"
"Should this still fail, please report your problem to pyne-dev@googlegroups.com\n\n"
"[1] http://www.enthought.com/products/epd.php\n"
"[2] http://www.hdfgroup.org/ftp/HDF5/releases/hdf5-{h5ver}/bin/windows/\n"
).format(h5ver=h5ver)
print(msg)
def setup():
from distutils import core
scripts = [os.path.join('scripts', f) for f in os.listdir('scripts')]
scripts = [s for s in scripts if (os.name == 'nt' and s.endswith('.bat')) or
(os.name != 'nt' and not s.endswith('.bat'))]
packages = ['pyne', 'pyne.lib', 'pyne.dbgen', 'pyne.apigen', 'pyne.xs',
'pyne.transmute', 'pyne.gui', 'pyne.cli']
pack_dir = {
'pyne': 'pyne',
'pyne.xs': 'pyne/xs',
'pyne.lib': 'pyne/lib',
'pyne.gui': 'pyne/gui',
'pyne.cli': 'pyne/cli',
'pyne.dbgen': 'pyne/dbgen',
'pyne.apigen': 'pyne/apigen',
'pyne.transmute': 'pyne/transmute',
}
extpttn = ['*.dll', '*.so', '*.dylib', '*.pyd', '*.pyo']
pack_data = {
'pyne': ['*.pxd', 'include/*.h', 'include/*.pxi', 'include/*/*.h', '*.inp',
'include/*/*/*.h', 'include/*/*/*/*.h', '*.json', '_includes/*.txt',
'_includes/*.pxd', '_includes/*/*', '_includes/*/*/*'] + extpttn,
'pyne.xs': ['*.pxd'] + extpttn,
'pyne.lib': extpttn,
'pyne.gui': ['*.pyw'],
'pyne.dbgen': ['*.html', '*.csv', 'abundances.txt', 'mass.mas12'],
}
setup_kwargs = {
"name": "pyne",
"version": INFO['version'],
"description": 'The Nuclear Engineering Toolkit',
"author": 'PyNE Development Team',
"author_email": 'scopatz@gmail.com',
"url": 'http://pyne.github.com/',
"packages": packages,
"package_dir": pack_dir,
"package_data": pack_data,
"scripts": scripts,
}
rtn = core.setup(**setup_kwargs)
if __name__ == "__main__":
main()
| teamsspaul/NUEN629 | Lab_Paste/NUEN629/LABS/LAB0/pyne-0.4/configure.py | configure.py | py | 4,236 | python | en | code | 1 | github-code | 13 |
16184350253 | """
AC(https://www.acmicpc.net/problem/5430)
- 함수는 R(뒤집기) D(버리기)
- R은 배열에 있는 숫자의 순서를 뒤집는 함수고, D는 첫 번째 숫자를 버리는 함수.
배열이 비어있는데 D를 사용한 경우에는 에러가 발생
- 함수는 조합해서 사용이 가능
- 입력 : 테스트 케이스의 개수 T(최대 100)
수행할 함수 P(1 <= p의 길이 <= 100,000)
배열에 들어있는 수의 개수 n(0 <= n <= 100,000)
[x1, ....., xn]과 같은 형태로 배열에 들어있는 수가 주어짐(1 <= xi <= 100)
전체 테스트 케이스에 주어지는 p의 길이의 합과 n의 합은 70만을 넘지 않음
- 출력 : 각 테스트 케이스에 대해서, 입력으로 주어진 정수 배열에 함수를 수행한 결과를 출력
만약 에러가 발생한 경우에는 error을 출력
"""
from sys import stdin
from collections import deque
t = int(stdin.readline().strip())
for _ in range(t):
p = list(stdin.readline().strip())
n = int(stdin.readline().strip())
data = stdin.readline()[1:-2].split(",")
q = deque(data)
rev, front, back = 0, 0, len(q) - 1
flag = 0
if n == 0:
q = []
front = 0
back = 0
for command in p:
if command == 'R':
rev += 1
elif command == 'D':
if len(q) < 1:
flag = 1
print("error")
break
else:
if rev % 2 == 0:
q.popleft()
else:
q.pop()
if flag == 0:
if rev % 2 == 0:
print("[" + ",".join(q) + "]")
else:
q.reverse()
print("[" + ",".join(q) + "]")
| akana0321/Algorithm | BaekJoon/Implementation/AC_5430.py | AC_5430.py | py | 1,822 | python | ko | code | 0 | github-code | 13 |
39115949086 | import os
import yaml
from typing import Dict, Any
class KaggleDbtSourceTableColumn:
"""A class representig a dbt source column, enriched with the kaggle metadata"""
def __init__(self, dbt_yaml: Dict[str, Any]):
"""Constructs all necessary attributes for the object from a parsed dby .yml file"""
self.name = dbt_yaml['name']
self.data_type = dbt_yaml['data_type']
self.kaggle_column_name = dbt_yaml['meta']['kaggle_column_name']
class KaggleDbtSourceTable:
"""A class representig a dbt source table, enriched with the kaggle metadata"""
def __init__(self, dbt_yaml: Dict[str, Any], schema: str):
"""Constructs all necessary attributes for the object from a parsed dby .yml file"""
self.name = dbt_yaml['name']
self.schema = schema
self.kaggle_file_name = dbt_yaml['meta']['kaggle_file_name']
self.columns = {c['name']: KaggleDbtSourceTableColumn(c) for c in dbt_yaml.get('columns', [])}
@property
def qualified_name(self) -> str:
"""Returns the qualified name for the source table"""
return f'{self.schema}.{self.name}'
def get_kaggle_to_dbt_mapping(self) -> Dict[str, str]:
"""Gets the mapping from the original names in the kaggle dataset to the sanitized names"""
return {c.kaggle_column_name: c.name for c in self.columns.values()}
class KaggleDbtSource:
"""A class representig a dbt source, enriched with the kaggle metadata. Can contain one or more tables"""
def __init__(self, dbt_yaml: Dict[str, Any]):
"""Constructs all necessary attributes for the object from a parsed dby .yml file"""
yaml_dbt_source = dbt_yaml['sources'][0]
self.name = yaml_dbt_source['name']
self.schema = yaml_dbt_source['schema']
# Pull the kaggle dataset name & parse it
kaggle_dataset_owner, kaggle_dataset_name = yaml_dbt_source['meta']['kaggle_dataset'].split('/')
self.kaggle_owner = kaggle_dataset_owner
self.kaggle_name = kaggle_dataset_name
# Configs for the CSV
self.delimiter = yaml_dbt_source['meta'].get('delimiter', '|')
self.null_value = yaml_dbt_source['meta'].get('null_value', 'NA')
self.encoding = yaml_dbt_source['meta'].get('encoding', 'utf-8')
# Build the tables. Pass the schema for convenience
self.tables = {t['name']: KaggleDbtSourceTable(t, self.schema) for t in yaml_dbt_source['tables']}
@property
def kaggle_full_name(self) -> str:
"""Build the full Kaggle dataset name"""
return f'{self.kaggle_owner}/{self.kaggle_name}'
def get_table(self, table_name: str) -> KaggleDbtSourceTable:
"""Returns a given table"""
return self.tables[table_name]
def read_kaggle_dbt_source_configs(dbt_project_path: str, dbt_project_name: str) -> KaggleDbtSource:
"""Reads and parses all dbt source configuration files (with the right naming) in a dbt project"""
dbt_source_cfgs = {}
dbt_models_path = f'{dbt_project_path}/{dbt_project_name}/models'
for ds in os.listdir(dbt_models_path):
with open(f'{dbt_models_path}/{ds}/sources/src_{ds}.yml', 'r') as ifile:
try:
dataset_cfg = KaggleDbtSource(yaml.safe_load(ifile))
except yaml.YAMLError as e:
print(e)
dbt_source_cfgs[dataset_cfg.name] = dataset_cfg
return dbt_source_cfgs | Beetelbrox/accident-information-challenge | airflow/plugins/kaggle_elt/kaggle_dbt_source.py | kaggle_dbt_source.py | py | 3,428 | python | en | code | 0 | github-code | 13 |
41266676814 | class Solution:
def subsets(self, nums: List[int]) -> List[List[int]]:
outputs = []
n = len(nums)
def backtrack(index, k, path):
if len(path[:]) == k:
outputs.append(path[:])
for i in range(index, n):
path.append(nums[i])
backtrack(i + 1, k, path)
path.pop()
for k in range(n+1):
backtrack(0, k, [])
return outputs | AshwinRachha/LeetCode-Solutions | 78-subsets/78-subsets.py | 78-subsets.py | py | 468 | python | en | code | 0 | github-code | 13 |
34815806735 | import numpy as np
from heapq import nlargest
from functools import reduce
"Horrible, embarrassing code."
def main() -> None:
with open("inputs/day9_input.in", "r") as f:
heightmap = []
lows = []
result_p1 = 0
result_p2 = []
for line in f.readlines():
heightmap.append(list(map(int, list(line.strip()))))
neighbors = find_neighbors(heightmap)
for k in neighbors:
if (neighbors[k][0] < min(neighbors[k][1])):
result_p1 += neighbors[k][0] + 1
lows.append(k)
for p in lows:
result_p2.append(find_basin(heightmap, p))
result_p2 = nlargest(3, result_p2)
print(result_p1)
print(reduce(lambda x, y: x * y, result_p2))
def find_basin(heightmap, point):
found_points = set()
x, y = point
sizes = []
directions = [(-1, 0), (0, 1), (1, 0), (0, -1)]
def search_direction(i=x, j=y, cache=-1):
if (heightmap[i][j] != 9 and heightmap[i][j] > cache):
found_points.add((i, j))
for dx, dy in directions:
if i + dx >= 0 and i + dx <= len(heightmap) - 1 and j + dy >= 0 and j + dy <= len(heightmap[i]) - 1:
search_direction(i + dx, j + dy, heightmap[i][j])
else:
continue
search_direction()
return len(found_points)
def find_neighbors(heightmap):
neighbordict = {}
for i in range(len(heightmap)):
for j, value in enumerate(heightmap[i]):
curr_neighbors = []
if i == 0 or i == len(heightmap) - 1 or j == 0 or j == len(heightmap[i])-1:
if i != 0:
curr_neighbors.append(heightmap[i-1][j])
if j != 0:
curr_neighbors.append(heightmap[i][j-1])
if i != len(heightmap) - 1:
curr_neighbors.append(heightmap[i+1][j])
if j != len(heightmap[i]) - 1:
curr_neighbors.append(heightmap[i][j+1])
else:
curr_neighbors = [
heightmap[i-1][j],
heightmap[i][j-1],
heightmap[i][j+1],
heightmap[i+1][j]
]
neighbordict[(i, j)] = (
heightmap[i][j], curr_neighbors)
return neighbordict
if __name__ == '__main__':
main()
| berkentekin/Advent_of_code_2021 | day09/day9.py | day9.py | py | 2,420 | python | en | code | 0 | github-code | 13 |
27648906983 | """
Made by @plutus
"""
import json
from types import SimpleNamespace
import requests
from traffic_source import TSProvider, TSCampaign
class TSPropellerAdsProvider(TSProvider):
"""
PropellerAds Provider
"""
def __init__(self, ts_name: str, api_key: str):
super().__init__(ts_name)
self.api = PropellerAdsAPIV5(api_key)
def get_ts_campaigns(self, page: int = None, page_size: int = None):
"""
Fetch PropellerAds campaigns and store them.
:param page: page number
:param page_size: page size
:return:
"""
if self.ts_campaigns:
return self.ts_campaigns
response = self.api.get_campaigns_list(
is_archived=0,
status=[
TSPropellerAdsCampaign.STATUS_WORKING,
TSPropellerAdsCampaign.STATUS_PAUSED,
TSPropellerAdsCampaign.STATUS_STOPPED,
TSPropellerAdsCampaign.STATUS_COMPLETED,
],
page=page,
page_size=page_size)
ts_campaigns = {}
for data in response.result:
ts_campaigns[data.id] = TSPropellerAdsCampaign(
ts_name=self.ts_name,
id=data.id,
url=data.target_url,
name=data.name
)
self.ts_campaigns = ts_campaigns
return self.ts_campaigns
def match(self, binom_campaign_url: str):
"""
:param binom_campaign_url: Binom campaign URL
:return: list of matched PropellerAds campaigns
"""
return [
ts_campaign
for ts_campaign_id, ts_campaign in self.get_ts_campaigns().items()
if binom_campaign_url in ts_campaign.url
]
def get_cost(
self,
ts_campaign_ids: list,
date_from: str,
date_to: str,
timezone: int
):
"""
:param ts_campaign_ids: list of campaigns ids
:param date_from: date from
:param date_to: date to
:param timezone: timezone
:return: list of costs grouped by campaign ids
"""
timezone_sign = '+' if timezone > 0 else '-'
ts_campaigns_stats = self.api.get_statistics({
"group_by": "campaign_id",
"day_from": date_from,
"day_to": date_to,
"tz": "{:s}{:02d}00".format(timezone_sign, abs(timezone)),
"campaign_id": ts_campaign_ids
})
ts_campaigns = self.get_ts_campaigns()
for stats in ts_campaigns_stats:
ts_campaigns[stats.campaign_id].set_cost(stats.money)
costs = {}
for ts_campaign_id, ts_campaign in self.ts_campaigns.items():
costs[ts_campaign_id] = ts_campaign.cost
return costs
class PropellerAdsAPIV5:
"""
V5 PropellerAds API handler
"""
def __init__(self, api_key):
self.api_key = api_key
self.base_uri = 'https://ssp-api.propellerads.com/v5/'
def __get(self, endpoint: str, payload: dict, headers: dict = None):
"""
:param endpoint: request endpoint
:param payload: payload dict
:param headers: headers dict
:return: response object
"""
headers = headers if headers else {}
response = requests.get(
"%s%s" % (self.base_uri, endpoint),
payload,
headers={
'Authorization':
'Bearer %s' % self.api_key,
**headers
}
)
return json.loads(
response.text,
object_hook=lambda d: SimpleNamespace(**d)
)
def __post(
self,
endpoint: str,
payload: dict = None,
json_data: dict = None,
headers: dict = None
):
"""
:param endpoint: request endpoint
:param payload: payload dict
:param json_data: json dict
:param headers: headers dict
:return: response object
"""
headers = headers if headers else {}
response = requests.post(
"%s%s" % (self.base_uri, endpoint),
payload,
json=json_data,
headers={
'Authorization':
'Bearer %s' % self.api_key,
**headers
}
)
return json.loads(
response.text,
object_hook=lambda d: SimpleNamespace(**d)
)
def get_campaigns_list(
self,
id=None,
status=None,
direction_id=None,
rate_model=None,
is_archived=0,
page=None,
page_size=None
):
"""
:param id:
:param status:
:param direction_id:
:param rate_model:
:param is_archived:
:param page:
:param page_size:
:return:
"""
payload = {'is_archived': is_archived}
if id:
payload['id[]'] = id
if status:
payload['status[]'] = status
if direction_id:
payload['direction_id[]'] = direction_id
if rate_model:
payload['rate_model[]'] = rate_model
if page:
payload['page'] = page
if page_size:
payload['page_size'] = page_size
return self.__get('adv/campaigns', payload)
def get_statistics(self, payload):
"""
:param payload: payload dict
:return: response object
"""
return self.__post(
'adv/statistics',
json_data=payload,
headers={'Content-Type': 'application/json'}
)
class TSPropellerAdsCampaign(TSCampaign):
"""
PropellerAds Campaign
"""
STATUS_DRAFT = 1
STATUS_MODERATION_PENDING = 2
STATUS_REJECTED = 3
STATUS_READY = 4
STATUS_TEST_RUN = 5
STATUS_WORKING = 6
STATUS_PAUSED = 7
STATUS_STOPPED = 8
STATUS_COMPLETED = 9
def __init__(self, ts_name, id, url, name):
super().__init__(ts_name, id, url, name)
| ourprofit/binom-cost-synchronizer | providers/propeller_ads.py | propeller_ads.py | py | 6,138 | python | en | code | 0 | github-code | 13 |
70491984978 | def findMin(list):
min = list[0]
for i in range(0, len(list)):
if list[i] < min:
min = list[i]
return min
def findMax(list):
max = list[0]
for i in range(0, len(list)):
if list[i] > max:
max = list[i]
return max
if __name__ == '__main__':
numbers = input("Podaj liczby po przecinku: ")
numbers = numbers.split(",")
numbers = [float(x) for x in numbers]
max = findMax(numbers)
min = findMin(numbers)
print(f'Min: {min}, Max: {max}')
| Dom0nS/Python-PJATK | project3/zad1.py | zad1.py | py | 525 | python | en | code | 0 | github-code | 13 |
23758653000 | from Crypto.Cipher import AES
iv = b"\x00"*16
key = b"andy love simone"
msg = b"andy love simoneandy love simone"
expected = "d6fdc5d5596e6ff6c3039cfbb5d9216f"
h = AES.new(key, AES.MODE_CBC, iv=iv).encrypt(msg)
hd = h.hex()
print(hd)
print(hd[-32:])
print(expected)
print(hd[-32:] == expected)
def XOR(b1, b2):
return bytes.fromhex(format(int(b1.hex(), 16) ^ int(b2.hex(), 16), 'x'))
wmsg = "andy love simoneandy love simone"
pt1 = wmsg[:16].encode()
pt2 = wmsg[-16:].encode()
ecb = AES.new(key, AES.MODE_ECB)
in1 = XOR(iv, pt1)
out1 = ecb.encrypt(in1)
in2 = XOR(pt2, out1)
out2 = ecb.encrypt(in2)
print(out2.hex())
print(out2.hex() == expected)
| micahshute/ece_code | applied_crypto/playground/lesson4/cbc_mac_test.py | cbc_mac_test.py | py | 657 | python | en | code | 1 | github-code | 13 |
38175390579 | #===============================================================================
# Autor : Rosenio Pinto
# e-mail: kenio3d@gmail.com
#===============================================================================
class Scene_Info(object):
def __init__(self, scene_check_state = {},
reference_check_state = {},
reference_full_path = {},
scene_short_name = '',
scene_full_path = '',
project_path = '',
reference_list = {},
start_frame = 0,
end_frame = 1):
object.__init__(scene_check_state,
reference_check_state,
reference_full_path,
scene_short_name,
scene_full_path,
project_path,
reference_list,
start_frame,
end_frame)
#=======================================================================
# Class parameters
#=======================================================================
self.scene_check_state = scene_check_state
self.reference_check_state = reference_check_state
self.reference_full_path = reference_full_path
self.scene_short_name = scene_short_name
self.scene_full_path = scene_full_path
self.project_path = project_path
self.reference_list = reference_list
self.start_frame = start_frame
self.end_frame = end_frame
#===========================================================================
# Set the scene check state
#===========================================================================
def Set_Scene_Check_State(self, value):
self.scene_check_state[self.scene_short_name] = value
self.Set_All_Reference_Check_State(value)
#===========================================================================
# Set the reference check state
#===========================================================================
def Set_Reference_Check_State(self, reference_type, reference_name, value):
self.reference_check_state[reference_type][reference_name] = value
#===========================================================================
# Get the scene check state
#===========================================================================
def Get_Scene_Check_State(self):
return self.scene_check_state[self.scene_short_name]
#===========================================================================
# Get the reference check state
#===========================================================================
def Get_Reference_Check_State(self, reference_type, reference_name):
return self.reference_check_state[reference_type][reference_name]
#===========================================================================
# Set all the scene check state
#===========================================================================
def Set_All_Reference_Check_State(self, value):
for reference_type, reference_list in self.reference_list.iteritems():
for reference_name in reference_list:
self.Set_Reference_Check_State(reference_type, reference_name, value)
#===========================================================================
# Return all data as dict format
#===========================================================================
def As_Dict(self):
return {
'Scene_CheckState_Info' :self.scene_check_state,
'Reference_CheckState_Info' :self.reference_check_state,
'Reference_Full_Path' :self.reference_full_path,
'reference_list' :self.reference_list,
'scene_short_name' :self.scene_short_name,
'scene_full_path' :self.scene_full_path,
'Project_Path' :self.project_path,
'start_frame' :self.start_frame,
'end_frame' :self.end_frame
}
| rosenio/Batch.io | scripts/Process/Utils/Scene_Info.py | Scene_Info.py | py | 4,566 | python | en | code | 6 | github-code | 13 |
33683139189 | from bs4 import BeautifulSoup
log = logging.getLogger('statistics') # Логер в контексте джанго, заменить на нативный
class Facebook_Stats:
def __init__(self, fb_post_id, fb_token):
self.fb_post_id = fb_post_id
self.fb_token = fb_token
def req_stats(self, url_method):
req = requests.get(url_method)
if req.status_code != 200:
log.info('Facebook_Stats: %s' % req.json())
return -1
return req.json().get('summary').get('total_count')
def fb_likes(self):
url_method = fb_url + '%s/likes?summary=true&access_token=%s' % (self.fb_post_id, self.fb_token)
return self.req_stats(url_method)
def fb_reactions(self):
url_method = fb_url + '%s/reactions?summary=total_count&access_token=%s' % (self.fb_post_id, self.fb_token)
return self.req_stats(url_method)
def fb_comments(self):
url_method = fb_url + '%s/comments?summary=true&access_token=%s' % (self.fb_post_id, self.fb_token)
return self.req_stats(url_method)
def fb_sharedposts(self):
url_method = fb_url + '%s/sharedposts?access_token=%s' % (self.fb_post_id, self.fb_token)
req = requests.get(url_method)
if req.status_code != 200:
log.info('Facebook_Stats: %s' % req.json())
return -1
return len(req.json().get('data'))
def fb_stats(self):
fb_likes, fb_reactions, fb_comments, fb_sharedposts = self.fb_likes(), self.fb_reactions(), self.fb_comments(),\
self.fb_sharedposts()
return int(fb_likes), int(fb_reactions), int(fb_comments), int(fb_sharedposts)
class MediumStats:
def stats(self, post_url):
url = post_url
html_doc = requests.get(url)
soup = BeautifulSoup(html_doc.text, 'html.parser')
lc = soup.find('div', class_='u-floatLeft buttonSet buttonSet--withLabels')
r_list = []
try:
t = lc.find_all('button', class_='button button--chromeless u-baseColor--buttonNormal')
except AttributeError as e:
log.info('MediumStats: %s' % str(e))
return -1, -1
if len(t) == 2:
for div in t:
try:
if div['data-action'] == 'show-recommends':
r_list.append(int(div.text))
if div['data-action'] == 'scroll-to-responses':
r_list.append(int(div.text))
else:
r_list.append(0)
except KeyError as e:
log.info('MediumStats: %s' % str(e))
return -1, -1
elif len(t) == 1:
if 'show-recommends' in str(t):
for div in t:
r_list.append(int(div.text))
r_list.append(0)
elif 'scroll-to-responses' in str(t):
for div in t:
r_list.append(0)
r_list.append(int(div.text))
elif len(t) == 0:
r_list.append(0)
r_list.append(0)
return r_list | maksymkv25/Medium-Facebook-statistics | main.py | main.py | py | 3,134 | python | en | code | 0 | github-code | 13 |
28462403140 | user_in = input("Число = ")
try:
user_num = int(user_in)
except ValueError:
message = "Ошибка, это не число"
else:
message = user_num ** 2 # какая-то лишняя операция затесалась
last_num = user_num % 10
new_num = user_num // 10
message = last_num * 10000 + new_num
print(message) | Sadburritos/python_homework | May26/09.py | 09.py | py | 378 | python | ru | code | 0 | github-code | 13 |
17056467084 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import json
from alipay.aop.api.constant.ParamConstants import *
class MybankCreditProdarrangementContracttextQueryModel(object):
def __init__(self):
self._bsn_no = None
self._contract_type = None
self._query_type = None
@property
def bsn_no(self):
return self._bsn_no
@bsn_no.setter
def bsn_no(self, value):
self._bsn_no = value
@property
def contract_type(self):
return self._contract_type
@contract_type.setter
def contract_type(self, value):
self._contract_type = value
@property
def query_type(self):
return self._query_type
@query_type.setter
def query_type(self, value):
self._query_type = value
def to_alipay_dict(self):
params = dict()
if self.bsn_no:
if hasattr(self.bsn_no, 'to_alipay_dict'):
params['bsn_no'] = self.bsn_no.to_alipay_dict()
else:
params['bsn_no'] = self.bsn_no
if self.contract_type:
if hasattr(self.contract_type, 'to_alipay_dict'):
params['contract_type'] = self.contract_type.to_alipay_dict()
else:
params['contract_type'] = self.contract_type
if self.query_type:
if hasattr(self.query_type, 'to_alipay_dict'):
params['query_type'] = self.query_type.to_alipay_dict()
else:
params['query_type'] = self.query_type
return params
@staticmethod
def from_alipay_dict(d):
if not d:
return None
o = MybankCreditProdarrangementContracttextQueryModel()
if 'bsn_no' in d:
o.bsn_no = d['bsn_no']
if 'contract_type' in d:
o.contract_type = d['contract_type']
if 'query_type' in d:
o.query_type = d['query_type']
return o
| alipay/alipay-sdk-python-all | alipay/aop/api/domain/MybankCreditProdarrangementContracttextQueryModel.py | MybankCreditProdarrangementContracttextQueryModel.py | py | 1,938 | python | en | code | 241 | github-code | 13 |
15602704920 | budget = float(input())
statists = int(input())
price_for_cloth_statists = float(input())
decor = budget * (10 / 100)
sum_for_cloth = statists * price_for_cloth_statists
if statists > 150:
percentage = sum_for_cloth * (10 / 100)
for_cloth = sum_for_cloth - percentage
money = for_cloth + decor
if budget >= money:
money2 = budget - money
money3 = format(money2, '.2f')
print("Action!")
print(f"Wingard starts filming with {money3} leva left.")
elif money > budget:
money4 = money - budget
money5 = format(money4, '.2f')
print("Not enough money!")
print(f"Wingard needs {money5} leva more.")
else:
money6 = sum_for_cloth + decor
if budget >= money6:
money7 = budget - money6
money10 = format(money7, '.2f')
print("Action!")
print(f"Wingard starts filming with {money10} leva left.")
elif money6 > budget:
money8 = money6 - budget
money9 = format(money8, '.2f')
print("Not enough money!")
print(f"Wingard needs {money9} leva more.")
| PowerCell12/Programming_Basics_Python | Conditional Statements/Exercise/05. Godzilla vs. Kong.py | 05. Godzilla vs. Kong.py | py | 1,094 | python | en | code | 0 | github-code | 13 |
17087076594 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import json
from alipay.aop.api.response.AlipayResponse import AlipayResponse
from alipay.aop.api.domain.RecResultInfo import RecResultInfo
class AlipayOpenDataItemRecommendBatchqueryResponse(AlipayResponse):
def __init__(self):
super(AlipayOpenDataItemRecommendBatchqueryResponse, self).__init__()
self._result_obj = None
@property
def result_obj(self):
return self._result_obj
@result_obj.setter
def result_obj(self, value):
if isinstance(value, list):
self._result_obj = list()
for i in value:
if isinstance(i, RecResultInfo):
self._result_obj.append(i)
else:
self._result_obj.append(RecResultInfo.from_alipay_dict(i))
def parse_response_content(self, response_content):
response = super(AlipayOpenDataItemRecommendBatchqueryResponse, self).parse_response_content(response_content)
if 'result_obj' in response:
self.result_obj = response['result_obj']
| alipay/alipay-sdk-python-all | alipay/aop/api/response/AlipayOpenDataItemRecommendBatchqueryResponse.py | AlipayOpenDataItemRecommendBatchqueryResponse.py | py | 1,088 | python | en | code | 241 | github-code | 13 |
73091704336 | import numpy as np
import soundfile as sf
from scipy import signal
import matplotlib.pyplot as plt
from scipy import array, zeros, signal
from scipy.fftpack import fft, ifft, convolve
#If using termux
import subprocess
import shlex
x,fs = sf.read('../data/Sound_Noise.wav')
y = np.zeros(len(x))
samp_freq = fs
order = 4
cutoff_freq = 4000.0
Wn = 2*cutoff_freq/samp_freq
b,a = signal.butter(order,Wn,'low')
h = np.zeros(len(x))
h[0] = (b[0]/a[0])
h[1] = (1/a[0]) * (b[1]-a[1]*h[0])
h[2] = (1/a[0]) * (b[2]-a[2]*h[0]-a[1]*h[1])
h[3] = (1/a[0]) * (b[3]-a[1]*h[2]-a[3]*h[0]-a[2]*h[1])
h[4] = (1/a[0]) * (b[4]-a[1]*h[3]-a[2]*h[2]-a[4]*h[0]-a[3]*h[1])
for i in range(5,len(x)):
h[i] = (1/a[0])*(-a[1]*h[i-1]-a[2]*h[i-2]-a[3]*h[i-3]-a[4]*h[i-4])
#circular convolution in freq domain
def conv(x, h):
X = fft(x)
H = fft(h)
#computing convolution in frequency domain
Y = X * H
#calculating time domain representation of Y
y = ifft(Y)
return y
y = conv(x,h)
plt.plot(np.real(y))
plt.title('Output signal through convolution')
plt.xlabel('n')
plt.ylabel('y(n)')
#If using termux
plt.savefig('../figs/4_3.pdf')
plt.savefig('../figs/4_3.eps')
subprocess.run(shlex.split("termux-open ../figs/4_3.pdf"))
| hellblazer1/EE3025_A1 | codes/4_3.py | 4_3.py | py | 1,220 | python | en | code | 0 | github-code | 13 |
15722003623 | import numpy as np
from matplotlib import pyplot as plt
def find_max_correlation(sound_array, min_shift, max_shift, plot=False, correlation_step=1):
auto_correlation = []
for shift in np.arange(min_shift, max_shift, correlation_step):
auto_correlation.append(np.corrcoef(sound_array[:-shift], sound_array[shift:])[0, 1])
shift_indices = np.arange(min_shift, max_shift, correlation_step)
if plot:
plt.plot(shift_indices, auto_correlation)
plt.title("autocorrelation")
plt.show()
s = sorted(zip(auto_correlation, shift_indices))
auto_correlation, shift_indices = map(list, zip(*s))
return shift_indices[-1]
| ArturPrzybysz/autotune | src/autocorrelation.py | autocorrelation.py | py | 670 | python | en | code | 1 | github-code | 13 |
22417634206 | import time
"""
Recursive Example: calculate Fibonacci sequence.
遞廻函式例子: 求斐波那契數列
"""
def fibo(n):
"""Return the n-th element of Fibonacci sequence.
Fibonacci sequnce is an infinite list of positive numbers,
begining with the first two ones, and any subsequent number that equals to
the sum of the previous two numbers."""
# checking the input must be a positive integer
if type(n) != int:
raise TypeError("The input must be a positive integer")
elif n < 1:
raise ValueError("The input must be a positive integer")
# computing the n-th term
if n == 1:
return 1
elif n == 2:
return 1
else:
return fibo(n - 1) + fibo(n - 2)
from functools import lru_cache
"""
利用 'functools' 模組的快取策略
The LRU caching scheme in 'functools' Module is to remove
the least recently used frame when the cache is full,
and a new page is referenced which is not there in the cache.
There are generally two cases with LRU Cache,
1. Page hit: If the required page is found in the main memory,
it is a page hit.
2. Page Fault: If the required page is not found in the main
memory, page fault occurs.
When a page is referenced, the required page may be in the memory.
1. If it is in the memory, we need to detach the node of
the list and bring it to the front of the queue.
2. If the required page is not in memory,
we bring that in memory.
In other words,
1. we add a new node to the front of the queue
2. update the corresponding node address in the hash.
If the queue is full, i.e. all the frames are full,
we remove a node from the rear of the queue.
When insert into the queue, we add the new node to the front
of the queue.
"""
@lru_cache(maxsize=1000)
def fibonacci(n):
# checking the input must be a positive integer
if type(n) != int:
raise TypeError("The input must be a positive integer")
elif n < 1:
raise ValueError("The input must be a positive integer")
# computing the n-th term
if n == 1:
return 1
elif n == 2:
return 1
else:
return fibonacci(n - 1) + fibonacci(n - 2)
# Main
if __name__ == '__main__':
max_loop = 30
# Time both functions
t1 = time.time()
for n in range(1, max_loop):
print(f'{n}:{fibo(n)}')
t2 = time.time()
cacheless = t2 - t1
print(f'Cacheless Fibonancii function took {cacheless} seconds\n')
t3 = time.time()
for n in range(1, max_loop):
print(f'{n}:{fibonacci(n)}')
t4 = time.time()
cached = t4 - t3
print(f'Fibonancii function with lru-cache took {cached} seconds\n')
# Comparison
print(
f'Comparison: Cacheless/Cached took {cacheless/cached:,.1f} times longer.\n')
# Fibonacci seq in a list
L = [fibonacci(n) for n in range(1, 11)]
print(f'Fibonacci seq in a list:\n===>{L}\n')
# Fibonacci seq in a tuple
T = (fibonacci(n) for n in range(1, 11))
print(f'Fibonacci seq in a tuple:\n===>{tuple(T)}\n')
# abnormal case
message = f'fibonacci("hello world")'
try:
print(message)
print(fibonacci("hello world"))
except:
print(f'===>Something is not right')
| mkaoy2k/PythonExample-Repo | fibo.py | fibo.py | py | 3,367 | python | en | code | 0 | github-code | 13 |
1980873096 | from torch.nn.modules.activation import LeakyReLU
from torch.nn.modules.batchnorm import BatchNorm1d
from .base import BaseVAE
from .types_ import *
import torch
from torch import nn
import torch.nn.functional as F
class BetaVAE(BaseVAE):
num_iter = 0
has_labels = False
def __init__(
self,
num_bands,
latent_dim,
hidden_dims: List,
beta: int = 5,
gamma: float = 1000.0,
max_capacity: int = 25,
Capacity_max_iter: int = 1e5,
loss_type: str = "B",
**kwargs
) -> None:
super(BetaVAE, self).__init__()
self.num_bands = num_bands
self.latent_dim = latent_dim
self.beta = beta
self.gamma = gamma
self.loss_type = loss_type
self.C_max = torch.Tensor([max_capacity])
self.C_stop_iter = Capacity_max_iter
self.hidden_dims = hidden_dims
self.lrelu_beta = 0.02
# Build encoder
layers = []
in_units = self.num_bands
activation = torch.nn.LeakyReLU(negative_slope=self.lrelu_beta)
for h_dim in self.hidden_dims:
layers.append(
nn.Sequential(
nn.Linear(in_features=in_units, out_features=h_dim),
# nn.BatchNorm1d(h_dim),
activation,
)
)
in_units = h_dim
self.encoder = nn.Sequential(*layers)
self.mu = nn.Linear(self.hidden_dims[-1], self.latent_dim)
self.var = nn.Linear(self.hidden_dims[-1], self.latent_dim)
# Build decoder
self.hidden_dims.reverse()
in_units = latent_dim
layers = []
for h_dim in self.hidden_dims:
layers.append(
nn.Sequential(
nn.Linear(in_features=in_units, out_features=h_dim),
# nn.BatchNorm1d(h_dim),
activation,
)
)
in_units = h_dim
self.decoder = nn.Sequential(*layers)
self.output_layer = nn.Sequential(
nn.Linear(in_units, self.num_bands), torch.nn.LeakyReLU(0.01)
)
def encode(self, inputs: Tensor, **kwargs) -> List[Tensor]:
x = self.encoder(inputs)
mu = self.mu(x)
var = self.var(x)
return [mu, var]
def decode(self, inputs: Tensor) -> Tensor:
x = self.decoder(inputs)
x = self.output_layer(x)
return x
def reparameterize(self, mu: Tensor, logvar: Tensor) -> Tensor:
std = torch.exp(0.5 * logvar)
eps = torch.randn_like(std)
return eps * std + mu
def forward(self, inputs, **kwarg) -> Tensor:
mu, log_var = self.encode(inputs)
z = self.reparameterize(mu, log_var)
return [self.decode(z), inputs, mu, log_var]
def loss_function(self, *args, **kwargs) -> dict:
self.num_iter += 1
recons = args[0]
input = args[1]
mu = args[2]
log_var = args[3]
kld_weight = kwargs["M_N"]
recons_loss = F.mse_loss(recons, input)
kld_loss = torch.mean(
-0.5 * torch.sum(1 + log_var - mu ** 2 - log_var.exp(), dim=1), dim=0
)
if self.loss_type == "H": # https://openreview.net/forum?id=Sy2fzU9gl
loss = recons_loss + self.beta * kld_weight * kld_loss
elif self.loss_type == "B": # https://arxiv.org/pdf/1804.03599.pdf
self.C_max = self.C_max.to(input.device)
C = torch.clamp(
self.C_max / self.C_stop_iter * self.num_iter, 0, self.C_max.data[0]
)
loss = recons_loss + self.gamma * kld_weight * (kld_loss - C).abs()
else:
raise ValueError("Undefined loss type.")
return {
"loss": loss,
"Reconstruction_Loss": recons_loss,
"KLD": kld_loss.detach(),
}
def sample(self, num_samples: int, current_device: int, **kwargs) -> Tensor:
z = torch.randn(num_samples, self.latent_dim)
z = z.to(current_device)
samples = self.decode(z)
return samples
def generate(self, x: Tensor, **kwargs) -> Tensor:
return self.forward(x, **kwargs)[0]
| burknipalsson/vae_synthetic_hsi | models/beta_vae.py | beta_vae.py | py | 4,371 | python | en | code | 4 | github-code | 13 |
17464518472 | import json
from _datetime import datetime
from PyQt5 import QtWidgets
from common import config
from common.static_func import get_uuid1
from view.customer.ui.ui_return_visit_setting import Ui_MainWindow
from database.dao.customer import customer_handler
from database.dao.sale import sale_handler
class ReturnVisitSetting(QtWidgets.QDialog, Ui_MainWindow):
def __init__(self, next_visit_time, record_id, car_phone, car_id, car_user):
super(ReturnVisitSetting, self).__init__()
self.id = record_id
self.car_phone = car_phone
self.car_id = car_id
self.car_user = car_user
self.setupUi(self)
self.next_visit_time = next_visit_time
self.info_label.setText(
'''您于<b>{}</b>要回访用户:<b>{}</b><br>联系方式为:<b>{}</b> < br>车牌号为:<b>{}</b> '''
.format(next_visit_time, car_user, car_phone, car_id))
self.submit_set.clicked.connect(self.change_state)
self.do_set_next_date.stateChanged.connect(self.set_time)
def change_state(self):
remarks = self.remark.text().strip()
if not remarks:
QtWidgets.QMessageBox.information(self.submit_set, "提示", "请输入备注")
else:
today = datetime.now()
get_data = {}
order_no = sale_handler.get_order_no(today)
get_data["orderNo"] = order_no
get_data["createdTime"] = today
get_data["carUser"] = self.car_user
get_data["carId"] = self.car_id
get_data["carPhone"] = self.car_phone
car_user = get_data.get("carUser", '-')
user_id = get_data.get("userId", '-')
worker_id = get_data.get("workerId", "-")
pc_id = get_data.get("pcId", "-")
car_phone = get_data.get("carPhone", "-")
car_model = get_data.get("carModel", "-")
car_id = get_data.get("carId", "-")
pc_sign = get_data.get("pcSign", '-')
worker_name = get_data.get("workerName", '-')
order_check_id = get_uuid1()
order_id = get_uuid1()
save_data = {
'createdTime': get_data.get("createdTime").strftime("%Y-%m-%d %H:%M:%S"),
'userId': user_id,
'pcId': pc_id,
'pcSign': pc_sign,
'carId': car_id,
'workerName': worker_name,
'workerId': worker_id,
'carUser': car_user,
'carPhone': car_phone,
'carModel': car_model,
"orderNo": order_no,
"orderCheckId": order_check_id,
'code': config.get_local_register_code(),
'attribute': json.dumps({"回访备注": remarks}),
'project': get_data.get('project', '-'),
'id': order_id
}
sale_handler.add_sale_detail(save_data)
# 更新回访状态,标记已回访
customer_handler.update_return_visit_state(self.id, '1')
# 如果填写了二次回访时间,则写入一条待回访数据
if self.do_set_next_date.isChecked():
# 回访设置
time_str = self.next_date.text()
time_list = time_str.split('/')
# XP上的时间是以-分割的
if len(time_list) < 3:
time_list = time_str.split("-")
# 有时候年份会在以后一个,如:03-25-2016,此时查询数据将出错,因此要判断一下
if len(time_list[2]) == 4:
mon = time_list[0]
day = time_list[1]
time_list[0] = time_list[2]
time_list[1] = mon
time_list[2] = day
time_str = ""
for t in time_list:
if len(t) < 2:
t = "0" + t
time_str += t + "-"
time_str = time_str[:-1]
customer_handler.add_return_visit_data(time_str, car_phone, car_id, car_user, today)
self.close()
def set_time(self):
if self.do_set_next_date.isChecked():
self.next_date.setEnabled(True)
else:
self.next_date.setEnabled(False)
| zgj0607/Py-store | view/customer/return_visit_setting.py | return_visit_setting.py | py | 4,365 | python | en | code | 3 | github-code | 13 |
7314400745 | INF = 1 << 31
# 構造体の定義
class maxflow_edges:
def __init__(self, to: int, cap: int, rev: int):
self.to = to
self.cap = cap
self.rev = rev
def dfs(pos, goal, F, G, used):
if pos == goal:
return F
used[pos] = True
for v in G[pos]:
if v.cap > 0 and used[v.to] is False:
# 各頂点のv.capの最小値を再帰的に求める
# goalまでusedがfalseなら, Fがflowに帰ってくる
flow = dfs(v.to, goal, min(F, v.cap), G, used)
if flow:
v.cap -= flow
# to → v方向のcapを増やしている
# revについては本を参照
G[v.to][v.rev].cap += flow
return flow
return 0
def ford_fulkerson(N, s, t, edges):
G = [list() for _ in range(N + 1)]
for a, b, c in edges:
G[a].append(maxflow_edges(b, c, len(G[b])))
G[b].append(maxflow_edges(a, 0, len(G[a]) - 1)) # G[a]に追加されたあとなので-1
total_flow = 0
# F(flow)が0になるまでやる
while True:
used = [False] * (N + 1)
F = dfs(s, t, INF, G, used)
if F > 0:
total_flow += F
else:
break
return total_flow
N, M = map(int, input().split())
edges = [list(map(int, input().split())) for _ in range(M)]
ans = ford_fulkerson(N, 1, N, edges)
print(ans)
| sugimotoyuuki/kyopro | tessoku/9_8/maximum_flow.py | maximum_flow.py | py | 1,421 | python | en | code | 0 | github-code | 13 |
36724961412 | from core.constants import LOWER_STARTING_POSITION
from core.robot import get_robot_wrapper
from py_trees.behaviour import Behaviour
from core.logger import log, LogLevel
from py_trees.common import Status
from core import constants
from enum import Enum
"""
Teleoperate the arm.
Operations:
X-PLANE
- up -> forward
- down -> back
- left -> left
- right -> right
Z-PLANE
- w -> up
- s -> down
GRIPPER
- o -> open
- c -> close
Press ENTER to finish and move to the next state.
"""
class Keys(Enum):
NO_KEY = -1
UP = 315
LEFT = 314
RIGHT = 316
DOWN = 317
SPACE = 32
W = 87
A = 65
S = 83
D = 68
O = 79
C = 67
ENTER = 4
FIVE = 53
SIX = 54
SEVEN = 55
EIGHT = 56
NINE = 57
ZERO = 48
class Teleoperate(Behaviour):
def __init__(self, name=None):
self.hardcoded_coords = name
super(Teleoperate, self).__init__(name)
self.robot = get_robot_wrapper()
self.coords = constants.UPPER_STARTING_POSITION
def initialise(self):
log("Teleoperating Arm", LogLevel.INFO)
# Can put coords into the name field
if self.hardcoded_coords:
self.coords = list(map(lambda x: float(x), self.hardcoded_coords.split(",")))
# Otherwise base it on the target object
else:
obj, shelf = self.robot.get_target_object()
self.coords = LOWER_STARTING_POSITION if not obj else obj.getPosition()
self.starting = [self.coords[0], self.coords[1], self.coords[2]]
self.orientation = [0, 0, 1]
def update(self):
key = self.robot.keyboard.getKey()
log(key)
if key == Keys.NO_KEY.value:
return Status.RUNNING
# X PLANE
if key == Keys.RIGHT.value:
self.coords[1] -= constants.TELE_ADJUSTMENT
elif key == Keys.LEFT.value:
self.coords[1] += constants.TELE_ADJUSTMENT
elif key == Keys.UP.value:
self.coords[0] += constants.TELE_ADJUSTMENT
elif key == Keys.DOWN.value:
self.coords[0] -= constants.TELE_ADJUSTMENT
# Z PLANE
elif key == Keys.W.value:
self.coords[2] += constants.TELE_ADJUSTMENT
elif key == Keys.S.value:
self.coords[2] -= constants.TELE_ADJUSTMENT
elif key == Keys.ZERO.value:
log(f"Resetting teleoperation from {self.coords} to {self.starting}.")
self.coords = self.starting
# GRIPPER
elif key == Keys.O.value:
self.robot.parts["gripper_left_finger_joint"].setVelocity(
self.robot.parts["gripper_left_finger_joint"].getMaxVelocity())
self.robot.parts["gripper_right_finger_joint"].setVelocity(
self.robot.parts["gripper_right_finger_joint"].getMaxVelocity())
self.robot.parts["gripper_left_finger_joint"].setPosition(constants.GRIPPER_OPEN_POS)
self.robot.parts["gripper_right_finger_joint"].setPosition(constants.GRIPPER_OPEN_POS)
elif key == Keys.C.value:
self.robot.parts["gripper_left_finger_joint"].setVelocity(
self.robot.parts["gripper_left_finger_joint"].getMaxVelocity() / 2.0)
self.robot.parts["gripper_right_finger_joint"].setVelocity(
self.robot.parts["gripper_right_finger_joint"].getMaxVelocity() / 2.0)
self.robot.parts["gripper_left_finger_joint"].setPosition(0)
self.robot.parts["gripper_right_finger_joint"].setPosition(0)
elif key == Keys.ENTER.value:
return Status.SUCCESS
try:
self.robot.ik_handler.reach_arm_to_target_camera_coords(self.coords, target_orientation=self.orientation)
except Exception as e:
log(f"error {e}")
return Status.RUNNING
def terminate(self, new_status):
log("Finished moving robot arm to starting position", LogLevel.DEBUG)
| tannerleise/RoboticsFinal | final_project/controllers/grocery_shopper/behavior/teleoperate.py | teleoperate.py | py | 3,940 | python | en | code | 0 | github-code | 13 |
17114312844 | """add_first_initial_column
Revision ID: 63f9353737ad
Revises: a68cb3e25cb2
Create Date: 2023-05-14 20:43:37.289649
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = '63f9353737ad'
down_revision = 'a68cb3e25cb2'
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.add_column('author', sa.Column('first_initial', sa.String(), nullable=True))
op.add_column('author_version', sa.Column('first_initial', sa.String(), autoincrement=False, nullable=True))
op.add_column('author_version', sa.Column('first_initial_mod', sa.Boolean(), server_default=sa.text('false'), nullable=False))
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_column('author_version', 'first_initial_mod')
op.drop_column('author_version', 'first_initial')
op.drop_column('author', 'first_initial')
# ### end Alembic commands ###
| alliance-genome/agr_literature_service | alembic/versions/20230514_63f9353737ad_add_first_initial_column.py | 20230514_63f9353737ad_add_first_initial_column.py | py | 1,032 | python | en | code | 1 | github-code | 13 |
4474091612 | #-*- coding:utf-8 -*-
from odoo import models, fields, api
from odoo.exceptions import ValidationError
class HrContractContribution(models.Model):
_name = "hr.contract.contribution"
_description = "Contract Pay Contribution"
name = fields.Char(string="Reference",
required=True)
code = fields.Char(string="Code")
partner_id = fields.Many2one(string="Partner",
comodel_name="res.partner",
required=True)
contract_id = fields.Many2one(string="Contract",
comodel_name="hr.contract",
required=True,
ondelete="cascade")
amount = fields.Float(string="Amount",
help="If % of Wage is checked, this is the ratio of the contribution based on the wage in the contract. Otherwise, this is a fixed amount.",
compute="_compute_amount",
store=True,
readonly=False,
default=0)
percentage_of_wage = fields.Boolean(string="% of Wage",
help="Check if the contribution amount is a percentage of the wage in the contract.")
employee_percent = fields.Float(string="Emp. %")
company_percent = fields.Float(string="Comp. %")
employee_amount = fields.Float(string="Emp. Amount",
compute="_compute_distributed_amount")
company_amount = fields.Float(string="Comp. Amount",
compute="_compute_distributed_amount")
table_id = fields.Many2one(string="Table",
comodel_name="hr.contribution.table")
@api.constrains("employee_percent", "company_percent")
def _check_percents(self):
for contrib in self:
if contrib.employee_percent < 0.0 or contrib.company_percent < 0.0:
raise ValidationError("Contribution percentage must be greater than 0.")
if (contrib.employee_percent + contrib.company_percent) != 100.0:
raise ValidationError("Employee and Company percentage must total to 100.")
@api.depends("amount", "employee_percent", "company_percent", "percentage_of_wage", "contract_id.wage")
def _compute_distributed_amount(self):
for contrib in self:
employee_amount, company_amount = contrib._get_distributed_amount()
contrib.employee_amount = employee_amount
contrib.company_amount = company_amount
def _get_distributed_amount(self, wage=False):
self.ensure_one()
wage = wage or self.contract_id.wage
amount = (wage * self.amount / 100.0) if self.percentage_of_wage else self._get_amount(wage=wage)
employee_amount = amount * self.employee_percent / 100.0
company_amount = amount * self.company_percent / 100.0
return employee_amount, company_amount
@api.depends("contract_id.wage", "table_id", "table_id.bracket_ids.lower_limit",
"table_id.bracket_ids.fixed_amount", "table_id.bracket_ids.percentage_amount")
def _compute_amount(self):
for contrib in self:
contrib.amount = contrib._get_amount()
def _get_amount(self, wage=False):
self.ensure_one()
wage = wage or self.contract_id.wage
amount = self.amount
if self.table_id and self.table_id.bracket_ids:
amount = 0
for bracket in self.table_id.bracket_ids[::-1]:
if wage > bracket.lower_limit:
amount = bracket.fixed_amount + \
bracket.percentage_amount / 100 * (wage - bracket.lower_limit)
break
return amount
@api.onchange("table_id")
def _onchange_table_id(self):
if self.table_id:
self.percentage_of_wage = False | LuisMalave2001/GarryTesting | hr_payroll_extends/models/hr_contract_contribution.py | hr_contract_contribution.py | py | 3,629 | python | en | code | 2 | github-code | 13 |
29542077785 | ascore = 100
bscore = 100
rounds = int(input())
for i in range(rounds):
a, b = input().split()
a = int(a)
b = int(b)
if a > b:
bscore -= a
elif b > a:
ascore -= b
else:
continue
print(ascore)
print(bscore) | orion222/competitive-programming | python/CCC/CCC 14 J3 Double Dice (2).py | CCC 14 J3 Double Dice (2).py | py | 273 | python | en | code | 0 | github-code | 13 |
38309046110 | import numpy as np
#pre_labels: predicated labels [[labels_0], [labels_1], ....]
#orig_labels: original labels of the same shape as pre_labels
# pre = tp / (tp + fp)
# all labels should be int
def gen_precision(pre_labels, orig_labels):
tp, fp = 0, 0
count_ins = len(pre_labels)
for i in range(count_ins):
count_cand = len(pre_labels[i])
#print (len(orig_labels[i]), len(pre_labels[i]) )
for j in range(count_cand):
if int( pre_labels[i][j] ) == 1:
if int( pre_labels[i][j] == orig_labels[i][j] ):
tp = tp + 1
else:
fp = fp + 1
precision = ( tp + 0.0 ) / ( tp + fp + 0.0)
return precision
#recall = tp / (tp + fn)
def gen_recall(pre_labels, orig_labels):
tp, fn = 0, 0
count_ins = len(pre_labels)
for i in range(count_ins):
count_cand = len(pre_labels[i])
for j in range(count_cand):
if int( orig_labels[i][j] ) == 1:
if int( pre_labels[i][j] == orig_labels[i][j] ):
tp = tp + 1
else:
fn = fn + 1
recall = ( tp + 0.0 ) / ( tp + fn + 0.0)
return recall
# accuracy = (tp + tn) / (tp + tn + fp + fn)
def gen_accuracy(pre_labels, orig_labels):
count = 0
t_count = 0
count_ins = len(pre_labels)
for i in range(count_ins):
count_cand = len(pre_labels[i])
for j in range(count_cand):
count = count + 1
if pre_labels[i][j] == orig_labels[i][j]:
t_count = t_count + 1
accuracy = ( t_count + 0.0 ) / count
return accuracy
| TrivialError/branchlearning | model_evaluation.py | model_evaluation.py | py | 1,645 | python | en | code | 3 | github-code | 13 |
18402639839 | # -*- coding: utf-8 -*-
"""
Created on Mon Nov 7 18:44:35 2022
@author: jhazelde
"""
import torch
import argparse
import gradient_methods as gm
from torchvision import datasets, transforms
from matplotlib import pyplot as plt
from tqdm import tqdm
import os
print(f'Using GPU: {torch.cuda.get_device_name(0)}')
os.environ["KMP_DUPLICATE_LIB_OK"]="TRUE"
parser = argparse.ArgumentParser(description='Train MNIST BNN')
parser.add_argument('-b', '--batch-size', default=10, type=int)
parser.add_argument('-T', '--timesteps', default=1000, type=int,
help='Simulation timesteps')
parser.add_argument('--lr', '--learning-rate', default=0.1, type=float,
help='Initial learning rate')
parser.add_argument('--dt', default=0.01, type=float,
help='Timestep for numerical integration')
parser.add_argument('-H', '--hidden-dim', default=100, type=int,
help='Dimension of hidden neuron layer')
parser.add_argument('-S', '--grad-samples', default=1, type=int,
help='Number of samples to use for weak gradient')
parser.add_argument('--stddev', '--standard-deviation', default=0.1, type=float,
help='Standard deviation to use for sampling for weak gradient')
parser.add_argument('--grad-method', default='SmoothGrad', type=str,
help='Gradient method to use (right now one of AutoGrad, SmoothGrad, RegGrad)')
parser.add_argument('--epochs', default=-1, type=int,
help='Number of training epochs. Defaults to infinite.')
parser.add_argument('--epoch-size', default=2000, type=int,
help='Number of training samples per epoch.')
parser.add_argument('--use-snn', default=False, type=bool,
help='Toggles SNN/BNN use.')
class Trainer:
# If grad_computer is None, use autodiff.
def __init__(self, model, optim = None, stddev = 0.0, T_sim = -1, grad_computer = None):
self.model = model
self.optim = optim
self.grad_computer = grad_computer
self.stddev = stddev
self.T_sim = T_sim
def eval_on_batch(self, batch):
if self.optim is not None:
self.optim.zero_grad()
with torch.set_grad_enabled(self.grad_computer is None):
return self.model(batch.cuda(), self.stddev, T_sim=self.T_sim)
def eval_losses(self, loss_fun, out, expected):
mean_out = torch.mean(out, dim=2)
target = expected.to(mean_out.device).unsqueeze(0).repeat((out.shape[0], 1, 1))
unreduced_loss = loss_fun(mean_out, target)
losses = torch.mean(unreduced_loss, dim=(1,2))
return losses
def eval_accuracy(self, out, expected):
mean_out = torch.mean(out, dim=2)
target = expected.to(mean_out.device).unsqueeze(0).repeat((out.shape[0], 1, 1))
correct = (torch.argmax(target, -1) == torch.argmax(mean_out, -1)).float()
return torch.mean(correct, 1)
def optim_step(self, losses):
# Compute gradients an step optimizer with computed gradient results.
if self.grad_computer is None:
losses[0].backward() # Use autodiff on first sampled loss.
else:
grads = self.grad_computer.compute_grad(losses, self.model.noises)
for i in range(len(self.model.Ws)):
self.model.Ws[i].grad = grads[i] # Manually copy gradient.
if self.optim is not None:
self.optim.step()
def load_mnist(train_batch_sz, test_batch_sz):
from spike_train_mnist import SpikeTrainMNIST
transform=transforms.Compose([
transforms.ToTensor()
])
train_mnist = datasets.MNIST(
'../data/mnist_torch/',
train=True, download=True, transform=transform,
)
test_mnist = datasets.MNIST(
'../data/mnist_torch/',
train=False, download=True, transform=transform,
)
train_dataset = SpikeTrainMNIST(train_mnist, 'train')
val_dataset = SpikeTrainMNIST(test_mnist, 'validation')
train_loader = torch.utils.data.DataLoader(train_dataset, batch_size=train_batch_sz, shuffle=True)
val_loader = torch.utils.data.DataLoader(val_dataset, batch_size=test_batch_sz, shuffle=False)
return train_loader, val_loader
def train_mnist(args):
from bnn import Noisy_Weights_BNN
from config import CFG
ident = f'N-{args.epoch_size}-lr-{args.lr}-S-{args.grad_samples}-stddev-{args.stddev}-T-{args.timesteps}-dt-{args.dt}-b-{args.batch_size}-method-{args.grad_method}'
print(f'Identifier: {ident}')
# CFG.lif_beta = 0.99
CFG.neuron_model = 'LIF'
CFG.n_samples_train = args.epoch_size
CFG.n_samples_val = 500
CFG.n_samples_test = 500
CFG.test_batch_sz = 250
CFG.plot = False
CFG.plot_all = False
CFG.dt = args.dt
CFG.sim_t = args.timesteps
CFG.hidden_layers = [args.hidden_dim]
loss_fun = torch.nn.MSELoss(reduction='none').to('cuda')
# Load MNIST.
train_loader, val_loader = load_mnist(args.batch_size, 250)
# Setup model.
model = Noisy_Weights_BNN(args.grad_samples).cuda()
# Setup trainer.
optim = torch.optim.Adam(model.parameters(), lr = args.lr)
grad_computer = None
if args.grad_method == 'SmoothGrad':
grad_computer = gm.SmoothGrad(model.Ws, args.stddev)
elif args.grad_method == 'RegGrad':
grad_computer = gm.RegGrad(model.Ws)
trainer = Trainer(model, optim, stddev = args.stddev, grad_computer = grad_computer)
# trainer.model.load_state_dict(torch.load('../data/model_best_110_N-4000-lr-0.01-S-100-stddev-0.15-T-1000-dt-0.01-b-10-method-RegGrad.pt'))
epochs = args.epochs
if epochs < 0:
epochs = int(1e10)
max_accuracy = 0.0
for e in range(epochs):
# Evaluate.
trainer.model.S = 1 # Only need one sample to get accuracy.
accuracy = 0.0
for batch, expected in tqdm(val_loader):
out = trainer.eval_on_batch(batch)
accuracy += trainer.eval_accuracy(out, expected)[0]
plt.figure(dpi=400)
plt.plot(trainer.model.layers[1].V.detach().cpu().numpy()[0, :, :])
plt.title(f'Sample voltage, epoch = {e}')
plt.show()
trainer.model.S = args.grad_samples # Reset for training.
print('validation accuracy: ', accuracy.item() / len(val_loader))
if max_accuracy < accuracy:
max_accuracy = accuracy
torch.save(trainer.model.state_dict(), f'../data/model_best_{e}_{ident}.pt')
with open(f'../data/accuracy_{e}_{ident}.txt', 'w') as fl:
print(accuracy.item() / len(val_loader), file=fl)
# Train.
loss_record, smooth_loss_record = [], []
for batch, expected in tqdm(train_loader):
out = trainer.eval_on_batch(batch)
losses = trainer.eval_losses(loss_fun, out, expected)
trainer.optim_step(losses)
loss_record.append(losses[0]. item())
smooth_loss_record.append(gm.compute_smoothed_loss(losses).item())
plt.figure(dpi=300)
plt.plot(loss_record)
plt.show()
def loss_landscape_mnist(args):
from bnn import Noisy_Weights_BNN
from config import CFG
ident = f'N-{args.epoch_size}-lr-{args.lr}-S-{args.grad_samples}-stddev-{args.stddev}-T-{args.timesteps}-dt-{args.dt}-b-{args.batch_size}-method-{args.grad_method}'
print(f'Identifier: {ident}')
CFG.n_samples_train = 1
CFG.n_samples_val = 1
CFG.n_samples_test = 500
CFG.test_batch_sz = 250
CFG.plot = False
CFG.plot_all = False
CFG.dt = args.dt
CFG.sim_t = args.timesteps
CFG.hidden_layers = [args.hidden_dim]
loss_fun = torch.nn.MSELoss(reduction='none').to('cuda')
# Load MNIST.
train_loader, val_loader = load_mnist(args.batch_size, CFG.n_samples_val)
for batch, expected in val_loader:
break
# Setup model.
model = Noisy_Weights_BNN(1).cuda()
# Setup trainer.
trainer = Trainer(model, None, stddev = args.stddev)
s0 = dict(trainer.model.state_dict())
s1 = dict(torch.load('../data//model_best_582_N-2000-lr-0.01-S-100-stddev-0.15-T-1000-dt-0.1-b-10-method-RegGrad.pt'))
cur_state = dict(s0)
loss_interp = []
interpolant = torch.linspace(0.0, 0.4, 100)
for interp in tqdm(interpolant):
for key in cur_state:
cur_state[key] = s0[key] + interp * (s1[key] - s0[key])
trainer.model.load_state_dict(cur_state)
out = trainer.eval_on_batch(batch)
losses = trainer.eval_losses(loss_fun, out, expected)
loss_interp.append(losses[0].cpu().item())
# plt.figure(dpi=400)
# plt.plot(trainer.model.layers[1].V.detach().cpu().numpy()[0, :, :])
# plt.show()
plt.plot(interpolant, loss_interp)
plt.show()
class Reservoir_Trainer:
# If grad_computer is None, use autodiff.
def __init__(self, reservoir, optim = None, S = 1, stddev = 0.0, grad_computer = None):
self.model = reservoir
self.model.W.set_params(S, stddev)
self.optim = optim
self.grad_computer = grad_computer
def eval_on_batch(self, batch):
if self.optim is not None:
self.optim.zero_grad()
self.model.W.noisify()
with torch.set_grad_enabled(self.grad_computer is None):
inp = batch.repeat(self.model.W.S, 1, 1).cuda()
out = self.model(inp)
return out.reshape((self.model.W.S, -1, out.shape[1], out.shape[2]))
def eval_losses(self, loss_fun, out, expected):
mean_out = torch.mean(out, dim=2)
target = expected.to(mean_out.device).unsqueeze(0).repeat((out.shape[0], 1, 1))
unreduced_loss = loss_fun(mean_out, target)
losses = torch.mean(unreduced_loss, dim=(1,2))
return losses
def eval_accuracy(self, out, expected):
mean_out = torch.mean(out, dim=2)
target = expected.to(mean_out.device).unsqueeze(0).repeat((out.shape[0], 1, 1))
correct = (torch.argmax(target, -1) == torch.argmax(mean_out, -1)).float()
return torch.mean(correct, 1)
# Compute gradients and step optimizer with computed gradient results.
def optim_step(self, losses):
if self.grad_computer is None:
losses[0].backward() # Use autodiff on first sampled loss.
else:
grads = self.grad_computer.compute_grad(losses, [self.model.W.noise])
self.model.W.W.grad = grads[0] # Manually copy gradient.
if self.optim is not None:
self.optim.step()
def reservoir_example(args):
from bnn import HH_Complete
from config import CFG
import numpy as np
CFG.dt = args.dt
CFG.sim_t = args.timesteps
res = HH_Complete(10).cuda()
z = torch.ones((1, CFG.sim_t, 10)).cuda()
for i in range(1):
res(z)
T = res.T.detach().cpu().numpy()
plt.figure(dpi=500)
plt.plot(T[0, :, :])
plt.show()
W = 1000
rates = torch.mean(res.T[:, -W:, :], 1)
rates = rates.detach().cpu().numpy()
plt.figure(dpi=500)
plt.plot(rates[0])
plt.show()
rates = np.zeros((T.shape[0], T.shape[1] - W, T.shape[2]))
for i in range(T.shape[1] - W):
rates[:, i, :] = np.mean(T[:, i:i+W, :])
plt.figure(dpi=500)
plt.plot(rates[0, :, :])
plt.show()
def fi_curve(args, model_str = '', comp_fn = lambda x: x):
from bnn import LIF_Complete, HH_Complete
from config import CFG
CFG.dt = args.dt
CFG.sim_t = args.timesteps
CFG.Iapp = 0.0
B = 100
L = 50
if args.use_snn:
model = LIF_Complete(L).cuda()
else:
model = HH_Complete(L).cuda()
if len(model_str) > 0:
model.load_state_dict(torch.load(model_str))
model.W.noisify()
W = 2000
print(W)
Iapp = torch.linspace(0.0, 2, B)
inp = torch.zeros((B, CFG.sim_t, L)).cuda()
for b in range(B):
inp[b, :, :] = Iapp[b]
inp[b, :, :-1] = 0.0
out = model(inp)
out = out[:, :, 0:1]
for i in [B // 5, B // 2]:
plt.plot(model.V[i, :, 0:1].detach().cpu(), linewidth = 1.0)
plt.plot(out[i, :, 0:1].detach().cpu(), linewidth = 0.5)
plt.title(f'Iapp = {Iapp[i].item():.1f}')
plt.show()
rate = out[:, -W:, :]
rate = torch.logical_and(rate[:, :-1, :] < 0.5, rate[:, 1:, :] >= 0.5).float()
rate = rate * (1000 / CFG.dt)
rate = torch.mean(rate, 1)
print(rate.shape)
FI = rate.detach().cpu()
start = 0
end = -1
print(FI.shape)
plt.plot(Iapp[start:end], FI[start:end, :])
plt.xlabel('Applied Current')
plt.ylabel('Firing Rate (Hz)')
loss = torch.mean(torch.abs(FI - comp_fn(Iapp).reshape(-1, 1)))
plt.title(f'Weights scale: {torch.mean(torch.abs(model.W.W.data))}; loss {loss}')
# plt.plot(Iapp, torch.mean(FI, 1), color='black', linestyle = 'dashed', linewidth=5, alpha=0.5)
# plt.plot(Iapp[start:end], comp_fn(Iapp)[start:end])
plt.show()
def train_reservoir_match(args):
from bnn import HH_Complete, LIF_Complete
from config import CFG
torch.manual_seed(0)
CFG.dt = args.dt
CFG.sim_t = args.timesteps
CFG.Iapp = 0.1
L = 50
model = LIF_Complete(L).cuda()
# model.W.noisify()
# optim = torch.optim.Adam([model.W.W], lr = args.lr)
# z = torch.ones((1, CFG.sim_t, 10)).cuda()
# out = model(z)
# print(out.shape)
# loss = torch.mean(out)
# loss.backward()
# exit()
optim = torch.optim.Adam([model.W.W], lr = args.lr)
grad_computer = None
if args.grad_method == 'SmoothGrad':
grad_computer = gm.SmoothGrad([model.W.W], args.stddev)
elif args.grad_method == 'RegGrad':
grad_computer = gm.RegGrad([model.W.W])
trainer = Reservoir_Trainer(model, optim, args.grad_samples, args.stddev, grad_computer)
loss_fun = torch.nn.MSELoss(reduction='none').to('cuda')
B = args.batch_size
W = 400
match_fn = lambda x: (1 - torch.cos(2 * torch.pi * x)) / 2.0
loss_record = []
for i in tqdm(range(1000)):
s = torch.rand(B).cuda()
z = torch.ones((B, CFG.sim_t, L)).cuda() * s.reshape((B,1,1))
z[:, :, :-1] = 0.0 # Only input to final neuron
match = match_fn(s)
expected = torch.ones((B, 1)).cuda() * match.reshape((B, 1))
out = trainer.eval_on_batch(z)
rate = out[:, :, -W:, 0:1]
# Discrete rate (more precise/interpretable).
# rate = torch.logical_and(rate[:, :, :-1, :] < 0.5, rate[:, :, 1:, :] >= 0.5).float()
# rate = rate * (1000 / CFG.dt)
# expected = expected * 1000
target = expected.reshape((1, -1, 1)).repeat(rate.shape[0], 1, 1)
losses = (torch.mean(rate, 2) - target)**2
losses = torch.mean(losses, (1, 2))
# losses = trainer.eval_losses(loss_fun, rate, expected)
trainer.optim_step(losses)
loss_record.append(losses[0].item())
print(torch.mean(expected).item(), torch.mean(rate).item())
if i % 50 == 0:
model_str = f'../data/reservoir{L}_match_{i}_cos_inout2.pt'
torch.save(model.state_dict(), model_str)
plt.plot([l**0.5 for l in loss_record])
plt.show()
plt.plot(out[0, 0, :, 0].cpu().detach())
plt.show()
fi_curve(args, model_str, match_fn)
def train_reservoir_mean_integrate(args):
from bnn import HH_Complete, LIF_Complete
from config import CFG
# torch.manual_seed(0)
CFG.dt = args.dt
CFG.sim_t = args.timesteps
CFG.Iapp = 0.0
L = 50
model = LIF_Complete(L).cuda()
# model.load_state_dict(torch.load('../data/reservoir50_mean_200_0.pt'))
# model.W.noisify()
# optim = torch.optim.Adam([model.W.W], lr = args.lr)
# z = torch.ones((1, CFG.sim_t, 10)).cuda()
# out = model(z)
# print(out.shape)
# loss = torch.mean(out)
# loss.backward()
# exit()
optim = torch.optim.Adam([model.W.W], lr = args.lr)
grad_computer = None
if args.grad_method == 'SmoothGrad':
grad_computer = gm.SmoothGrad([model.W.W], args.stddev)
elif args.grad_method == 'RegGrad':
grad_computer = gm.RegGrad([model.W.W])
trainer = Reservoir_Trainer(model, optim, args.grad_samples, args.stddev, grad_computer)
loss_fun = torch.nn.MSELoss(reduction='none').to('cuda')
B = args.batch_size
W = 30 # Very small window because we want to have some notion of memory
delay = 0
loss_record = []
for i in tqdm(range(1000)):
s = torch.rand(B).cuda()
mean = torch.ones((B, CFG.sim_t, L)).cuda() * s.reshape((B,1,1))
z = torch.normal(mean, 0.0)
if False: # Plot integral
integral = torch.zeros(CFG.sim_t)
integral[0] = z[0,0,0]
for p in range(1, CFG.sim_t):
integral[p] = integral[p-1] + z[0, p, 0]
integral /= CFG.sim_t
plt.subplot(2,1,1)
plt.plot(z[0, :, 0])
plt.xticks([])
plt.axhline(s[0], 0, CFG.sim_t)
plt.title(s[0].item())
plt.subplot(2,1,2)
plt.plot(integral)
plt.title(integral[-1].item())
plt.show()
z[:, :, :-1] = 0.0 # Only input to final neuron
if delay > 0:
z[:, -delay:, :] = 0.0 # Turn off for final interval
expected = torch.ones((B, 1)).cuda() * s.reshape((B, 1))
out = trainer.eval_on_batch(z)
rate = out[:, :, -W:, 0:1]
# Discrete rate (more precise/interpretable).
# rate = torch.logical_and(rate[:, :, :-1, :] < 0.5, rate[:, :, 1:, :] >= 0.5).float()
# rate = rate * (1000 / CFG.dt)
# expected = expected * 1000
target = expected.reshape((1, -1, 1)).repeat(rate.shape[0], 1, 1)
losses = (torch.mean(rate, 2) - target)**2
losses = torch.mean(losses, (1, 2))
# losses = trainer.eval_losses(loss_fun, rate, expected)
loss_record.append(losses[0].item())
print(torch.mean(torch.abs(torch.mean(rate, 2) - target)), losses[0].item())
if i % 50 == 0:
model_str = f'../data/reservoir{L}_mean_{i}_{delay}.pt'
torch.save(model.state_dict(), model_str)
plt.plot([l**0.5 for l in loss_record])
plt.show()
plt.subplot(2,1,1)
plt.plot(z.cpu().detach()[:, :, -1].transpose(0,1))
plt.subplot(2,1,2)
integral = torch.zeros(CFG.sim_t)
integral[0] = out[0, 0, 0, 0]
for i in range(1, CFG.sim_t):
integral[i] = out[0, 0, i, 0] + integral[i-1]
mn = 0 if i <= W else i - W
integral[i] = torch.mean(out[0, 0, mn:i, 0])
# integral /= CFG.sim_t
plt.plot(integral.cpu().detach())
# plt.plot(z[0, :, -1].cpu().detach())
plt.axhline(s[0].cpu().item(), 0, CFG.sim_t)
# plt.plot(out[0, :, :, 0].cpu().detach().transpose(0,1), linewidth = 0.9)
plt.show()
fi_curve(args, model_str)
trainer.optim_step(losses)
def plot_weight_change(model, A, B, L):
import glob
fls = glob.glob('../data/wang_RUN2_50_*.pt')
fls = [fls[i] for i in [0, 1, 2] + list(range(3, len(fls), 3))]
Ws = []
means = []
for fl in fls:
model.load_state_dict(torch.load(fl))
W = model.W.W.data.cpu().detach()
Ws.append(W)
mean_grid = torch.zeros((3,3))
inds = [0, 15, 30, L]
for i in range(3):
for j in range(3):
mean_grid[i, j] = torch.mean(W[inds[i]:inds[i+1], inds[j]:inds[j+1]])
means.append(mean_grid)
vmin = min([torch.min(mean) for mean in means])
vmax = max([torch.max(mean) for mean in means])
abs_max = max(abs(vmin), abs(vmax))
vmin = -abs_max; vmax = abs_max
plt.figure(figsize = (3,3))
for idx in range(len(fls)):
plt.subplot(3, 3, 1 + idx)
W = means[idx]
im = plt.imshow(W, cmap='PiYG', vmin = vmin, vmax = vmax)
plt.box(False); plt.xticks([]); plt.yticks([])
if idx == 0:
plt.xticks([0, 1, 2], ['A', 'B', 'ext'])
plt.gca().xaxis.tick_top()
plt.yticks([0, 1, 2], ['A', 'B', 'ext'])
# plt.axhline(14.5, 0, L, color = 'black')
# plt.axhline(29.5, 0, L, color = 'black')
# plt.axvline(14.5, 0, L, color = 'black')
# plt.axvline(29.5, 0, L, color = 'black')
fig = plt.gcf()
fig.subplots_adjust(right=0.8)
cbar_ax = fig.add_axes([0.85, 0.15, 0.05, 0.7])
plt.colorbar(im, cax=cbar_ax)
plt.show()
def wang_task_train(args, train = True, linear_mu = False):
from bnn import HH_Complete, LIF_Complete
from config import CFG
from torchviz import make_dot
# torch.manual_seed(0)
CFG.dt = args.dt
CFG.sim_t = args.timesteps
# CFG.Iapp = 0.5
# CFG.lif_beta = 0.99
L = 50
A = range(0, 15)
B = range(15, 30)
if args.use_snn:
model = LIF_Complete(L).cuda()
else:
model = HH_Complete(L).cuda()
# model.load_state_dict(torch.load('../data/wang_RUN2_50_950.pt'))
if not train:
plot_weight_change(model, A, B, L)
optim = torch.optim.Adam([model.W.W], lr = args.lr)
grad_computer = None
if args.grad_method == 'SmoothGrad':
grad_computer = gm.SmoothGrad([model.W.W], args.stddev)
elif args.grad_method == 'RegGrad':
grad_computer = gm.RegGrad([model.W.W])
trainer = Reservoir_Trainer(model, optim, args.grad_samples, args.stddev, grad_computer)
bsize = args.batch_size
std = 0.5
firing_window = CFG.sim_t // 10
select_rate, deny_rate = 0.8, 0.2 # We want rate to be equal to select_rate if we select this category and equal to deny_rate if not.
loss_fun = torch.nn.MSELoss(reduction='none').cuda()
loss_record = []
for i in tqdm(range(1000)):
mu_a, mu_b = torch.rand(bsize).cuda(), torch.rand(bsize).cuda()
if linear_mu:
mu_a, mu_b = torch.linspace(0, 1.0, bsize).cuda(), torch.zeros(bsize).cuda()
z = torch.normal(torch.zeros(bsize, CFG.sim_t, L), std).cuda()
z[:, :, A] += mu_a.reshape((bsize, 1, 1))
z[:, :, B] += mu_b.reshape((bsize, 1, 1))
out = trainer.eval_on_batch(z).reshape((-1, CFG.sim_t, L))
window = out[:, -firing_window:, :]
rA = torch.mean(window[:, :, A], (1, 2))
rB = torch.mean(window[:, :, B], (1, 2))
targetA = (mu_a > mu_b).float() * (select_rate - deny_rate) + deny_rate
targetB = (mu_a < mu_b).float() * (select_rate - deny_rate) + deny_rate
targetA = targetA.repeat(args.grad_samples)
targetB = targetB.repeat(args.grad_samples)
losses = 0.5 * (loss_fun(rA, targetA) + loss_fun(rB, targetB))
losses = torch.mean(losses.reshape((-1, bsize)), 1) # Mean over batches, not grad samples.
loss_record.append(losses[0].item())
accuracy = ((rA[:bsize] > rB[:bsize]) == (mu_a > mu_b)).float()
if train:
if i % 5 == 0:
print(f'accuracy: {accuracy.mean().item()}')
model_str = f'../data/wang_BNN_{L}_{i}.pt'
torch.save(model.state_dict(), model_str)
plt.plot(loss_record)
plt.show()
fi_curve(args, model_str)
trainer.optim_step(losses)
else:
print(f'accuracy: {accuracy.mean().item()}')
if linear_mu:
# Compute accuracy versus mean.
accuracy = accuracy.detach().cpu()
bins = torch.linspace(0.0, 1.0, 30)
hist = torch.zeros_like(bins)
stride = bsize // len(bins)
for k in range(len(bins)):
hist[k] = torch.mean(accuracy[stride*k:stride*(k+1)])
plt.plot(bins[:-1], hist[:-1], '-o')
plt.xlabel('$|\mu_A - \mu_B|$')
plt.ylabel('Accuracy')
plt.show()
# Compute reaction time versus mean.
thresh = 0.7
timesteps = torch.argmax((torch.mean(out[:, :, A], 2) > thresh).float(), 1)
reaction_times = timesteps * CFG.dt
plt.plot(mu_a.cpu().detach(), reaction_times.cpu().detach(), '.')
plt.ylabel('Reaction time (ms)')
plt.xlabel('$|\mu_A - \mu_B|$')
import numpy as np
fit = np.polyfit(mu_a.cpu().detach().numpy(), reaction_times.cpu().detach().numpy(), 4)
p = np.poly1d(fit)
plt.plot(mu_a.cpu().detach(), p(mu_a.cpu().detach().numpy()))
plt.show()
# Analyze neuron responses.
for wind in [1, firing_window]:
rate_all_A = torch.zeros((bsize, CFG.sim_t))
rate_all_B = torch.zeros((bsize, CFG.sim_t))
for i in range(CFG.sim_t):
mx = min(CFG.sim_t, wind + i)
rate_all_A[:, i] = torch.mean(out[:, i:mx, A], (1,2))
rate_all_B[:, i] = torch.mean(out[:, i:mx, B], (1,2))
outc = out.cpu().detach()
for b in range(1):
pop_matrix = outc[b, :, :31].transpose(0,1)
pop_matrix[A, :] = 1 - pop_matrix[A, :]
pop_matrix[B, :] += 1 # This is just a trick to make imshow color two pops differently.
plt.imshow(pop_matrix, cmap='bwr', aspect='auto', interpolation = 'none')
plt.show()
plt.title(f'$\mu_A =$ {mu_a[b].item():.2f}, $\mu_B =$ {mu_b[b].item():.2f}')
plt.plot(rate_all_A.detach().cpu()[b, :], color = 'blue')
plt.plot(rate_all_B.detach().cpu()[b, :], color = 'red')
plt.show()
return
if __name__ == '__main__':
args = parser.parse_args()
wang_task_train(args)
exit()
# fi_curve(args, '../data/reservoir50_mean_250.pt', lambda x : x**2)
wang_task_train(args)
exit()
#train_mnist(args)
| meeree/LossLandscapesBNN | src/trainer_v2.py | trainer_v2.py | py | 26,930 | python | en | code | 0 | github-code | 13 |
19733164841 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Sat Jul 24 16:41:32 2021
@author: rachid2
"""
from sklearn.datasets import load_digits
from frnmf import FRNMF
import nimfa # from https://nimfa.biolab.si/
import tools
import numpy as np
#
import warnings
warnings.filterwarnings('ignore', category=FutureWarning)
#warnings.filterwarnings("ignore", category=RuntimeWarning)
# load data
data = load_digits()
X = data.data
y = data.target
# sacel data
X = tools.featureScaling(X, scaler_name = 'minmax')
# parameters
# r: rank
# max iteration: mi
r = len(np.unique(y)) # number of centers (classes)
mi = 500
# matrix factorization algorithms
# Feature Relationship Preservation NMF (FRNM)
frnmf = FRNMF(X.T, rank = r, lmbd =0.5, max_it = mi)
frnmf.compute_w_h()
Hfrnmf = frnmf.H
y_pred = np.argmax(Hfrnmf, axis=0)
tools.scoresList(y, y_pred, 'FRNMF')
# Basic NMF (NMF)
nmf = nimfa.Nmf(X.T, rank = r, max_iter = mi)
nmf_fit = nmf()
Hnmf = nmf_fit.coef()
y_pred = np.array(np.argmax(Hnmf, axis=0))[0]
tools.scoresList(y, y_pred, 'NMF')
#Probabilistic Sparse Matrix Factorization (PSNMF)
psmf = nimfa.Psmf(X.T, rank = r, max_iter=mi)
psmf_fit = psmf()
Hpsmf = psmf_fit.coef()
y_pred = np.array(np.argmax(Hpsmf, axis=0))[0]
tools.scoresList(y, y_pred, 'PSMF')
#Probabilistic Matrix Factorization (PMF)
pmf = nimfa.Pmf(X.T, rank = r, max_iter=mi)
pmf_fit = pmf()
Hpmf = pmf_fit.coef()
y_pred = np.array(np.argmax(Hpmf, axis=0))[0]
tools.scoresList(y, y_pred, 'PMF')
# Penalized Matrix Factorizartion for Constrained Clustering (PMFCC)
pmfcc = nimfa.Pmfcc(X.T, rank = r, max_iter=mi)
pmfcc_fit = pmfcc()
Hpmfcc = pmfcc_fit.coef()
y_pred = np.array(np.argmax(Hpmfcc, axis=0))[0]
tools.scoresList(y, y_pred, 'PMFCC')
#Spare NMF (SNMF)
snmf = nimfa.Snmf(X.T, seed="random_c", rank = r, max_iter=mi)
snmf_fit = snmf()
Hsnmf = snmf_fit.coef()
y_pred = np.array(np.argmax(Hsnmf, axis=0))[0]
tools.scoresList(y, y_pred, 'SNMF')
| Hedjrachid/FR-NMF | demo.py | demo.py | py | 1,954 | python | en | code | 0 | github-code | 13 |
17043310294 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import json
from alipay.aop.api.constant.ParamConstants import *
class AlipayMsaasMediarecogVoiceMediaaudioUploadModel(object):
def __init__(self):
self._data = None
self._extinfo_a = None
self._extinfo_b = None
self._extinfo_c = None
self._extinfo_d = None
self._labeltime = None
self._vname = None
self._vtype = None
@property
def data(self):
return self._data
@data.setter
def data(self, value):
self._data = value
@property
def extinfo_a(self):
return self._extinfo_a
@extinfo_a.setter
def extinfo_a(self, value):
self._extinfo_a = value
@property
def extinfo_b(self):
return self._extinfo_b
@extinfo_b.setter
def extinfo_b(self, value):
self._extinfo_b = value
@property
def extinfo_c(self):
return self._extinfo_c
@extinfo_c.setter
def extinfo_c(self, value):
self._extinfo_c = value
@property
def extinfo_d(self):
return self._extinfo_d
@extinfo_d.setter
def extinfo_d(self, value):
self._extinfo_d = value
@property
def labeltime(self):
return self._labeltime
@labeltime.setter
def labeltime(self, value):
self._labeltime = value
@property
def vname(self):
return self._vname
@vname.setter
def vname(self, value):
self._vname = value
@property
def vtype(self):
return self._vtype
@vtype.setter
def vtype(self, value):
self._vtype = value
def to_alipay_dict(self):
params = dict()
if self.data:
if hasattr(self.data, 'to_alipay_dict'):
params['data'] = self.data.to_alipay_dict()
else:
params['data'] = self.data
if self.extinfo_a:
if hasattr(self.extinfo_a, 'to_alipay_dict'):
params['extinfo_a'] = self.extinfo_a.to_alipay_dict()
else:
params['extinfo_a'] = self.extinfo_a
if self.extinfo_b:
if hasattr(self.extinfo_b, 'to_alipay_dict'):
params['extinfo_b'] = self.extinfo_b.to_alipay_dict()
else:
params['extinfo_b'] = self.extinfo_b
if self.extinfo_c:
if hasattr(self.extinfo_c, 'to_alipay_dict'):
params['extinfo_c'] = self.extinfo_c.to_alipay_dict()
else:
params['extinfo_c'] = self.extinfo_c
if self.extinfo_d:
if hasattr(self.extinfo_d, 'to_alipay_dict'):
params['extinfo_d'] = self.extinfo_d.to_alipay_dict()
else:
params['extinfo_d'] = self.extinfo_d
if self.labeltime:
if hasattr(self.labeltime, 'to_alipay_dict'):
params['labeltime'] = self.labeltime.to_alipay_dict()
else:
params['labeltime'] = self.labeltime
if self.vname:
if hasattr(self.vname, 'to_alipay_dict'):
params['vname'] = self.vname.to_alipay_dict()
else:
params['vname'] = self.vname
if self.vtype:
if hasattr(self.vtype, 'to_alipay_dict'):
params['vtype'] = self.vtype.to_alipay_dict()
else:
params['vtype'] = self.vtype
return params
@staticmethod
def from_alipay_dict(d):
if not d:
return None
o = AlipayMsaasMediarecogVoiceMediaaudioUploadModel()
if 'data' in d:
o.data = d['data']
if 'extinfo_a' in d:
o.extinfo_a = d['extinfo_a']
if 'extinfo_b' in d:
o.extinfo_b = d['extinfo_b']
if 'extinfo_c' in d:
o.extinfo_c = d['extinfo_c']
if 'extinfo_d' in d:
o.extinfo_d = d['extinfo_d']
if 'labeltime' in d:
o.labeltime = d['labeltime']
if 'vname' in d:
o.vname = d['vname']
if 'vtype' in d:
o.vtype = d['vtype']
return o
| alipay/alipay-sdk-python-all | alipay/aop/api/domain/AlipayMsaasMediarecogVoiceMediaaudioUploadModel.py | AlipayMsaasMediarecogVoiceMediaaudioUploadModel.py | py | 4,129 | python | te | code | 241 | github-code | 13 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.