seq_id stringlengths 4 11 | text stringlengths 113 2.92M | repo_name stringlengths 4 125 ⌀ | sub_path stringlengths 3 214 | file_name stringlengths 3 160 | file_ext stringclasses 18
values | file_size_in_byte int64 113 2.92M | program_lang stringclasses 1
value | lang stringclasses 93
values | doc_type stringclasses 1
value | stars int64 0 179k ⌀ | dataset stringclasses 3
values | pt stringclasses 78
values |
|---|---|---|---|---|---|---|---|---|---|---|---|---|
34801867065 | import matplotlib.pyplot as plt
import seaborn as sns
import pandas as pd
import numpy as np
# Load the dataset
file_path = './data/Retail_Investors_Focus.xlsx'
data = pd.read_excel(file_path)
# Show basic information about the dataset and the first few rows
data_info = data.info()
first_rows = data.head()
data_info, first_rows
# Sort data by Percent in descending order
data['Investment Strategy'] = [x for _, x in sorted(zip(data['Percent of Respondents'], data['Investment Strategy']), reverse=True)]
data['Percent of Respondents'] = sorted(data['Percent of Respondents'], reverse=True)
# Set up the plot
sns.set_theme(style="whitegrid")
plt.figure(figsize=(10, 8))
# Draw the bar chart
sns.barplot(y=data['Investment Strategy'], x=data['Percent of Respondents'], orient='h', palette="rocket")
# Customize the plot
plt.title('What are Retail Investors\nInterested in Buying in 2023?', fontsize=16, fontweight='bold')
plt.xlabel('Percent of Respondents', fontsize=14)
plt.ylabel('')
plt.xticks(fontsize=12)
plt.yticks(fontsize=12)
# Add percentages on the bars
for index, value in enumerate(data['Percent of Respondents']):
plt.text(value, index, f' {value}%', va='center', fontsize=12, color='white', fontweight='bold')
# Save plot
plt.savefig('./output/percent_retail_buying.png', bbox_inches='tight')
# Show the plot
plt.tight_layout()
plt.show() | monacosc1/makeover-monday | 2023/W41/retail_investors.py | retail_investors.py | py | 1,367 | python | en | code | 0 | github-code | 13 |
23603257269 | import numpy as np
import matplotlib.pyplot as plt
try:
import cupy as cp
except ImportError or ModuleNotFoundError:
print('CuPy is not found, using NumPy backend...')
cp = np
def draw_PSF_difference(inp_0, inp_1, diff, is_log=False, diff_clims=None, crop=None, colormap='viridis'):
from mpl_toolkits.axes_grid1 import make_axes_locatable
fig, axs = plt.subplots(1,3)
fig.set_size_inches(15,15)
if crop is None:
data_0 = np.copy(inp_0)
data_1 = np.copy(inp_1)
else:
data_0 = inp_0[crop]
data_1 = inp_1[crop]
if is_log:
vmin = np.nanmin(np.log(data_0))
vmax = np.nanmax(np.log(data_0))
im0 = axs[0].imshow(np.log(data_0), vmin=vmin, vmax=vmax, cmap=colormap)
else:
vmin = np.nanmin(data_0)
vmax = np.nanmax(data_0)
im0 = axs[0].imshow(data_0, vmin=vmin, vmax=vmax, cmap=colormap)
divider = make_axes_locatable(axs[0])
cax = divider.append_axes('right', size='10%', pad=0.05)
axs[0].set_axis_off()
fig.colorbar(im0, cax=cax, orientation='vertical')
if is_log:
im1 = axs[1].imshow(np.log(data_1), vmin=vmin, vmax=vmax, cmap=colormap)
else:
im1 = axs[1].imshow(data_1, vmin=vmin, vmax=vmax, cmap=colormap)
divider = make_axes_locatable(axs[1])
cax = divider.append_axes('right', size='10%', pad=0.05)
axs[1].set_axis_off()
fig.colorbar(im1, cax=cax, orientation='vertical')
if diff_clims is None:
diff_clims = np.abs(diff).max()
im2 = axs[2].imshow(diff, cmap=plt.get_cmap("RdYlBu"), vmin=-diff_clims, vmax=diff_clims)
divider = make_axes_locatable(axs[2])
cax = divider.append_axes('right', size='10%', pad=0.05)
axs[2].set_axis_off()
fig.colorbar(im2, cax=cax, orientation='vertical')
def mask_circle(N, r, center=(0,0), centered=True):
factor = 0.5 * (1-N%2)
if centered:
coord_range = np.linspace(-N//2+N%2+factor, N//2-factor, N)
else:
coord_range = np.linspace(0, N-1, N)
xx, yy = np.meshgrid(coord_range-center[1], coord_range-center[0])
pupil_round = np.zeros([N, N], dtype=np.int32)
pupil_round[np.sqrt(yy**2+xx**2) < r] = 1
return pupil_round
# To make this function work, one must ensure that size of inp can be divided by N
def binning(inp, N):
if N == 1: return inp
xp = cp if hasattr(inp, 'device') else np
out = xp.dstack(xp.split(xp.dstack(xp.split(inp, inp.shape[0]//N, axis=0)), inp.shape[1]//N, axis=1))
return out.sum(axis=(0,1)).reshape([inp.shape[0]//N, inp.shape[1]//N]).T
def Gaussian2DTilted(amp=1.0, x_0=0.0, y_0=0.0, s_x=1.0, s_y=1.0, ang=0.0):
if s_x < 1e-2 or s_y < 1e-2: return None
obj_resolution = 15
lin_space = np.arange(-obj_resolution//2+1, obj_resolution//2+1)
xx, yy = np.meshgrid(lin_space, lin_space)
ang1 = ang*np.pi/180.0
A = np.cos(ang1)**2 / (2*s_x**2) + np.sin(ang1)**2 / (2*s_y**2)
B = -np.sin(2*ang1) / (4*s_x**2) + np.sin(2*ang1) / (4*s_y**2)
C = np.sin(ang1)**2 / (2*s_x**2) + np.cos(ang1)**2 / (2*s_y**2)
return amp * np.exp(-(A*(xx-x_0)**2 + 2*B*(xx-x_0)*(yy-y_0) + C*(yy-y_0)**2))
def magnitudeFromPSF(tel, photons, band, sampling_time=None):
if sampling_time is None:
sampling_time = tel.det.sampling_time
zero_point = tel.src.PhotometricParameters(band)[2]
fluxMap = photons / tel.pupil.sum() * tel.pupil
nPhoton = np.nansum(fluxMap / tel.pupilReflectivity) / (np.pi*(tel.D/2)**2) / sampling_time
return -2.5 * np.log10(368 * nPhoton / zero_point )
def TruePhotonsFromMag(tel, mag, band, sampling_time): # [photons/aperture] !not per m2!
c = tel.pupilReflectivity*np.pi*(tel.D/2)**2*sampling_time
return tel.src.PhotometricParameters(band)[2]/368 * 10**(-mag/2.5) * c
def NoisyPSF(tel, PSF, integrate=True):
return tel.det.getFrame(PSF, noise=True, integrate=integrate)
Nph_diff = lambda m_1, m_2: 10**(-(m_2-m_1)/2.5)
mag_diff = lambda ph_diff: -2.5*np.log10(ph_diff) | EjjeSynho/LIFT | tools/misc.py | misc.py | py | 3,990 | python | en | code | 0 | github-code | 13 |
11001981670 | import subprocess
import threading
scan_complete_event = threading.Event()
def run_pocsuite_scan(target, poc_file):
pocsuite3_command = f"pocsuite -r {poc_file} -u {target}"
print(f"Starting Pocsuite3 scan for {target} using POC file {poc_file}")
try:
result = subprocess.run(pocsuite3_command, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE, text=True)
if result.returncode == 0:
print(f"Pocsuite3 scan for {target} using POC file {poc_file} completed successfully:")
print(result.stdout)
else:
print(f"Pocsuite3 scan for {target} using POC file {poc_file} failed:")
print(result.stderr)
except Exception as e:
print(f"Error running Pocsuite3 for {target}: {str(e)}")
scan_complete_event.set()
def main():
targets_and_pocs = [
{"target_url": "http://xxxx.com", "poc_file_path": "/pco"},
]
threads = []
for item in targets_and_pocs:
target_url = item["target_url"]
poc_file_path = item["poc_file_path"]
thread = threading.Thread(target=run_pocsuite_scan, args=(target_url, poc_file_path))
threads.append(thread)
thread.start()
for thread in threads:
thread.join()
if __name__ == "__main__":
main()
| yuag/bgscan | pocsuite3/pocsuite3.py | pocsuite3.py | py | 1,308 | python | en | code | 9 | github-code | 13 |
2260399068 | import numpy as np
import cv2
import pyzbar.pyzbar as pyzbar
import csv
import pandas as pd
import time
import tkinter as tk
item = [0,0,0,0,0,0,0,0,0,0]
def update_items_local(data):
global item
if data == 'Item:1':
item[0] = item[0]+1
elif data == 'Item:2':
item[1]=item[1]+1
elif data == 'Item:3':
item[2]=item[2]+1
elif data == 'Item:4':
item[3]=item[3]+1
elif data == 'Item:5':
item[4]=item[4]+1
elif data == 'Item:6':
item[5]=item[5]+1
elif data == 'Item:7':
item[6]=item[6]+1
elif data == 'Item:8':
item[7]=item[7]+1
elif data == 'Item:9':
item[8]=item[8]+1
elif data == 'Item:10':
item[9]=item[9]+1
print(item)
print('RECOGNIZED')
time.sleep(3)
def webcam():
cap = cv2.VideoCapture(0)
ret =True
while(ret):
# Capture frame-by-frame
ret, frame = cap.read()
#Capture the barcode
decodedObjects = pyzbar.decode(frame)
if len(decodedObjects)!=0:
for decodedObject in decodedObjects:
hull = decodedObject.polygon
n = len(hull)
for j in range(0,n):
cv2.line(frame, hull[j], hull[ (j+1) % n], (0,0,255), 3)
print((decodedObject.data).decode())
data_decoded = decodedObject.data.decode()
update_items_local(data_decoded)
# Display the resulting frame
cv2.imshow('frame',frame)
if cv2.waitKey(1) & 0xFF == ord('q'):
csv_file = 'Items.csv'
with open(csv_file, "a") as csvfile:
writer = csv.writer(csvfile)
writer.writerow(item)
break
# When everything done, release the capture
cap.release()
cv2.destroyAllWindows()
if __name__=='__main__':
webcam()
| adil-ammar/Smart-Inventory-Management | Updation/Trial.py | Trial.py | py | 1,972 | python | en | code | 0 | github-code | 13 |
13809080168 | from typing import List,Tuple
from simulacion import Simulacion,Metrica,crear_eventos_llegada,Evento,crear_eventos_salida
from tareas import Tarea,tareas_random,string_a_fecha
import bisect
import configuracion
from configuracion import Configuracion,print
import json
from math import ceil
from administradores import Administrador,crear_administrador,PefilProgramador
import matplotlib.pyplot as plt
from tqdm import tqdm_gui
import time
def cargar_tareas(path:str)->List[Tarea]:
if not path:
return None
with open(path) as file:
tareas_specs =json.load(file)
f = lambda t: string_a_fecha(t["fecha_creacion"])
tareas_specs.sort(key=f)
return [Tarea(t) for t in tareas_specs]
def realizar_simulacion(tareas_file_path:str=None)->Tuple[Simulacion,List]:
lista_tareas = cargar_tareas(tareas_file_path)
if not lista_tareas:
lista_tareas = tareas_random(configuracion.configuracion().tiempo_fin_simulacion)
admin_junior = crear_administrador(PefilProgramador.Junior,configuracion.configuracion().cantidad_juniors)
admin_ssr = crear_administrador(PefilProgramador.Semisenior,configuracion.configuracion().cantidad_semiseniors)
admin_sr = crear_administrador(PefilProgramador.Senior,configuracion.configuracion().cantidad_seniors)
administradores = [admin_junior,admin_ssr,admin_sr]
administradores = list(filter(lambda a:a.programadores>0,administradores))
data = Simulacion(configuracion.configuracion(), lista_tareas,administradores)
return simular(data)
def insertar_eventos(eventos:List[Evento],mas_eventos:List[Evento]):
for e in mas_eventos:
tiempos_eventos = list(map(lambda ev: ev.tiempo, eventos))
desplazamiento = bisect.bisect_right(tiempos_eventos, e.tiempo)
eventos.insert(desplazamiento,e)
return eventos
def simular(simulacion:Simulacion)->Tuple[Simulacion,List]:
print("Comenzando simulacion ...")
print(f"CANTIDAD DE TAREAS ANTES DE FILTRO: {len(simulacion.tareas)}",True)
#SOLO SE SIMULA LO QUE ESTA DENTRO DEL RANGO DE TIEMPO
simulacion.tareas = list(filter(lambda t:t.tiempo_creacion<simulacion.tiempo_fin,simulacion.tareas))
print(f"CANTIDAD DE TAREAS DESPUES DE FILTRO: {len(simulacion.tareas)}",True)
eventos = crear_eventos_llegada(simulacion.tareas)
procesados=0
total=len(eventos)
while(simulacion.tiempo_sistema<simulacion.tiempo_fin):
# for _ in tqdm_gui(range(2*total)):
# time.sleep(0.001)
evento = eventos.pop(0) if len(eventos)>0 else None
if evento is None:
break
procesados +=1
simulacion.tiempo_sistema = evento.tiempo
nuevos_eventos = simulacion.resolver(evento)
eventos = insertar_eventos(eventos,nuevos_eventos)
if procesados%ceil(0.1*total)==0:
print(f"PROCESADOS {procesados} EVENTOS")
print(f"TIEMPO FINAL: {simulacion.tiempo_sistema}",True)
print(f"PROCESADOS {procesados} EVENTOS",True)
# with open("realizadas.json","w+") as f:
# json.dump([t.get_dict() for t in simulacion.tareas_finalizadas],f)
# with open("sin_terminar.json","w+") as f:
# json.dump([t.get_dict() for t in simulacion.tareas_asignadas],f)
# with open("tareas.json","w+") as f:
# json.dump([t.get_dict() for t in simulacion.tareas],f)
return simulacion,simulacion.resultado_metricas()
if __name__ == "__main__":
simulacion,resultados = realizar_simulacion(configuracion.configuracion().archivo_datos)
# print(f"RESULTADOS: {resultados}")
print(f"RESULTADOS: {simulacion.resumen()}")
for m in simulacion.metricas:
m.generar_grafico() | alexiscaspell/task-simulator | app.py | app.py | py | 3,815 | python | es | code | 0 | github-code | 13 |
74190171217 | import sys
import cv2
import re
import os
def cutSkinHead(skinPath, headPath, size=(64, 64)):
img = cv2.imread(skinPath)
img = img[8:16, 8:16]
img = cv2.resize(img, size, interpolation=cv2.INTER_AREA)
cv2.imwrite(headPath, img)
if __name__ == '__main__':
skinPath = sys.argv[1] if len(sys.argv) > 1 else None
headPath = sys.argv[2] if len(sys.argv) > 2 else None
size = sys.argv[3] if len(sys.argv) > 3 else None
if skinPath is None:
print("No skin path specified")
sys.exit(1)
if not os.path.exists(skinPath):
print("Skin path does not exist")
sys.exit(1)
if not os.path.isfile(skinPath):
print("Skin path is not a file")
sys.exit(1)
if not skinPath.endswith(".png"):
print("Skin path should be .png")
sys.exit(1)
if headPath is None:
print("No head path specified, using default.")
headPath = os.path.basename(skinPath).replace(".png", ".") + "head.png"
if not headPath.endswith(".png"):
print("Head path should be .png, using default.")
headPath = os.path.basename(skinPath).replace(".png", ".") + "head.png"
if size is None:
print("No size specified, using default 64x64px")
size = "64x64"
if not len(re.findall(r'^\d+x\d+$', size)) > 0:
print("Size must be in format like 64x64")
print("Using default 64x64px")
size = (64, 64)
else:
size = tuple(map(int, size.split('x')))
try:
cutSkinHead(skinPath, headPath, size)
print("Done")
except Exception as e:
print(f'Unexpected error: {e}')
| Gura-Dev/skin-service | main.py | main.py | py | 1,643 | python | en | code | 0 | github-code | 13 |
38711658221 | print("You in cinema.Please enter your age ")
while True:
print("If you want to stop write end")
answer=input("Write your age")
if answer.lower()=="end":
break
else:
answer=int(answer)
if answer<=3:
print("For you we haven't cost")
elif answer<12>3:
print("You may buy for 10$")
elif answer>=12:
print("Cost of ticket 15$")
| VigularIgnat/python | mygr/Exam/pr2.py | pr2.py | py | 416 | python | en | code | 0 | github-code | 13 |
30936382696 | #coding=utf-8
import copy, numpy as np
np.random.seed(0)
# compute sigmoid nonlinearity #定义sigmoid函数
def sigmoid(x):
output = 1/(1+np.exp(-x))
return output
# convert output of sigmoid function to its derivative #计算sigmoid函数的倒数
def sigmoid_output_to_derivative(output):
return output*(1-output)
# training dataset generation
int2binary = {} #用于将输入的整数转为计算机可运行的二进制数用
binary_dim = 8 #定义了二进制数的长度=8
largest_number = pow(2,binary_dim) #二进制数最大能取的数就=256喽
binary = np.unpackbits(
np.array([range(largest_number)],dtype=np.uint8).T,axis=1)
for i in range(largest_number): #将二进制数与十进制数做个一一对应关系
int2binary[i] = binary[i]
# input variables
alpha = 0.1 #反向传播时参数w更新的速度
input_dim = 2 #输入数据的维度,程序是实现两个数相加的
hidden_dim = 16 #隐藏层神经元个数=16
output_dim = 1 #输出结果值是1维的
# initialize neural network weights #初始化神经网络的权重参数
synapse_0 = 2*np.random.random((input_dim,hidden_dim)) - 1 #输入至神经元的w0,维度为2X16,取值约束在[-1,1]间
synapse_1 = 2*np.random.random((hidden_dim,output_dim)) - 1 #神经元至输出层的权重w1,维度为16X1,取值约束在[-1,1]间
synapse_h = 2*np.random.random((hidden_dim,hidden_dim)) - 1 #神经元前一时刻状态至当前状态权重wh,维度为16X16,取值约束在[-1,1]间
synapse_0_update = np.zeros_like(synapse_0) #构造与w0相同维度的矩阵,并初始化为全0;
synapse_1_update = np.zeros_like(synapse_1)
synapse_h_update = np.zeros_like(synapse_h)
# training logic
for j in range(10000): #模型迭代次数,可自行更改
# generate a simple addition problem (a + b = c)
a_int = np.random.randint(largest_number/2) # int version #约束初始化的输入加数a的值不超过128
a = int2binary[a_int] # binary encoding #将加数a的转为对应二进制数
b_int = np.random.randint(largest_number/2) # int version
b = int2binary[b_int] # binary encoding
# true answer
c_int = a_int + b_int #真实和
c = int2binary[c_int]
# where we'll store our best guess (binary encoded)
d = np.zeros_like(c) #用于存储预测的和
overallError = 0 #打印显示误差
layer_2_deltas = list() #反向求导用
layer_1_values = list()
layer_1_values.append(np.zeros(hidden_dim)) #先对隐藏层前一时刻状态初始化为0
# moving along the positions in the binary encoding
for position in range(binary_dim): #前向传播;二进制求和,低位在右,高位在左
# generate input and output
X = np.array([[a[binary_dim - position - 1],b[binary_dim - position - 1]]]) #输入的a与b(二进制形式)
y = np.array([[c[binary_dim - position - 1]]]).T #真实label值
# hidden layer (input ~+ prev_hidden)
layer_1 = sigmoid(np.dot(X,synapse_0) + np.dot(layer_1_values[-1],synapse_h)) # X*w0+RNN前一时刻状态值*wh
# output layer (new binary representation)
layer_2 = sigmoid(np.dot(layer_1,synapse_1)) #layer_1*w1
# did we miss?... if so, by how much?
layer_2_error = y - layer_2 #求误差
layer_2_deltas.append((layer_2_error)*sigmoid_output_to_derivative(layer_2)) #代价函数
#print layer_1.shape,X.shape
overallError += np.abs(layer_2_error[0]) #误差,打印显示用
# decode estimate so we can print it out
d[binary_dim - position - 1] = np.round(layer_2[0][0]) #预测的和
# store hidden layer so we can use it in the next timestep
layer_1_values.append(copy.deepcopy(layer_1)) #深拷贝,将RNN模块状态值存储,用于反向传播
future_layer_1_delta = np.zeros(hidden_dim)
for position in range(binary_dim): #反向传播,计算从左到右,即二进制高位到低位
X = np.array([[a[position],b[position]]])
layer_1 = layer_1_values[-position-1]
prev_layer_1 = layer_1_values[-position-2]
# error at output layer
layer_2_delta = layer_2_deltas[-position-1]
# error at hidden layer
layer_1_delta = (future_layer_1_delta.dot(synapse_h.T) +\
layer_2_delta.dot(synapse_1.T)) * sigmoid_output_to_derivative(layer_1)
#print future_layer_1_delta.shape,layer_2_delta.shape,synapse_1.shape,layer_1.shape
# let's update all our weights so we can try again
synapse_1_update += np.atleast_2d(layer_1).T.dot(layer_2_delta) #对w1进行更新
synapse_h_update += np.atleast_2d(prev_layer_1).T.dot(layer_1_delta) #对wh进行更新
synapse_0_update += X.T.dot(layer_1_delta) #对w0进行更新
future_layer_1_delta = layer_1_delta
synapse_0 += synapse_0_update * alpha
synapse_1 += synapse_1_update * alpha
synapse_h += synapse_h_update * alpha
synapse_0_update *= 0
synapse_1_update *= 0
synapse_h_update *= 0
# print out progress
if(j % 1000 == 0): #每1000次打印结果
print ("Error:" + str(overallError))
print ("Pred:" + str(d))
print ("True:" + str(c))
out = 0
for index,x in enumerate(reversed(d)):
out += x*pow(2,index)
print (str(a_int) + " + " + str(b_int) + " = " + str(out))
print ("------------")
| kanuore/lstm-in-mnist | reference/exmple.py | exmple.py | py | 6,234 | python | en | code | 0 | github-code | 13 |
45622166196 | import scrapy
from dianyingPro.items import DianyingproItem
class DianyingSpider(scrapy.Spider):
name = "dianying"
# allowed_domains = ["dianyi.ng"]
start_urls = ["https://dianyi.ng/v/action.html "]
# start_urls = ["https://www.hacg.sbs/wp/anime.html"]
url = 'https://dianyi.ng/v/action-%d.html'
# url = 'https://www.hacg.sbs/wp/anime.html/page/%d'
page_num = 2
def detail_parse(self, response):
item = response.meta['item']
dianying_desc =response.xpath('//*[@id="main"]/div/div[1]/div[3]/div[2]/div[8]/div/span/text()').extract_first()
# dianying_desc = response.xpath('//*[@id="content"]/article/div[1]//text()').extract()
# dianying_desc = ''.join(dianying_desc) #空格拼接
item['dianying_desc'] = dianying_desc
# print(dianying_desc)
# print(item)
yield item
def parse(self, response):
# pass
# item = DianyingproItem() -------------->放在循环外面 导致只抓取列表最后一条
# h1_list = response.xpath('//*[@id="content"]/article//h1')
div_list = response.xpath('//*[@id="main"]/div[1]/div[2]/div[2]/div/div/div[2]')
# print(h1_list)
# for h in h1_list:
for div in div_list:
item = DianyingproItem()
# dianying_name = h.xpath('./a/text()').extract_first()
dianying_name = div.xpath('./a/text()').extract_first()
# print(dianying_name)
item['dianying_name'] = dianying_name
dianying_url ='https://dianyi.ng/' + div.xpath('.//a/@href').extract_first()
# dianying_url = h.xpath('./a/@href').extract_first()
yield scrapy.Request(url=dianying_url,callback=self.detail_parse,meta={'item':item})
if self.page_num <= 3:
new_url = format(self.url%self.page_num)
self.page_num += 1
yield scrapy.Request(url=new_url,callback=self.parse) | Mryaochen/python_code | dianyingPro/dianyingPro/spiders/dianying.py | dianying.py | py | 1,944 | python | en | code | 1 | github-code | 13 |
20429632741 | import collections
import os.path as osp
from itertools import repeat
import torch.utils.data
from cogdl.data import Adjacency, Graph
from cogdl.utils import makedirs
from cogdl.utils import accuracy, cross_entropy_loss
def to_list(x):
if not isinstance(x, collections.Iterable) or isinstance(x, str):
x = [x]
return x
def files_exist(files):
return all([osp.exists(f) for f in files])
class Dataset(torch.utils.data.Dataset):
r"""Dataset base class for creating graph datasets.
See `here <https://rusty1s.github.io/pycogdl/build/html/notes/
create_dataset.html>`__ for the accompanying tutorial.
Args:
root (string): Root directory where the dataset should be saved.
transform (callable, optional): A function/transform that takes in an
:obj:`cogdl.data.Data` object and returns a transformed
version. The data object will be transformed before every access.
(default: :obj:`None`)
pre_transform (callable, optional): A function/transform that takes in
an :obj:`cogdl.data.Data` object and returns a
transformed version. The data object will be transformed before
being saved to disk. (default: :obj:`None`)
pre_filter (callable, optional): A function that takes in an
:obj:`cogdl.data.Data` object and returns a boolean
value, indicating whether the data object should be included in the
final dataset. (default: :obj:`None`)
"""
@staticmethod
def add_args(parser):
"""Add dataset-specific arguments to the parser."""
pass
@property
def raw_file_names(self):
r"""The name of the files to find in the :obj:`self.raw_dir` folder in
order to skip the download."""
raise NotImplementedError
@property
def processed_file_names(self):
r"""The name of the files to find in the :obj:`self.processed_dir`
folder in order to skip the processing."""
raise NotImplementedError
def download(self):
r"""Downloads the dataset to the :obj:`self.raw_dir` folder."""
raise NotImplementedError
def process(self):
r"""Processes the dataset to the :obj:`self.processed_dir` folder."""
raise NotImplementedError
def __len__(self):
r"""The number of examples in the dataset."""
return 1
def get(self, idx):
r"""Gets the data object at index :obj:`idx`."""
raise NotImplementedError
def __init__(self, root, transform=None, pre_transform=None, pre_filter=None):
super(Dataset, self).__init__()
self.root = osp.expanduser(osp.normpath(root))
self.raw_dir = osp.join(self.root, "raw")
self.processed_dir = osp.join(self.root, "processed")
self.transform = transform
self.pre_transform = pre_transform
self.pre_filter = pre_filter
self._download()
self._process()
@property
def num_features(self):
r"""Returns the number of features per node in the graph."""
return self[0].num_features
@property
def raw_paths(self):
r"""The filepaths to find in order to skip the download."""
files = to_list(self.raw_file_names)
return [osp.join(self.raw_dir, f) for f in files]
@property
def processed_paths(self):
r"""The filepaths to find in the :obj:`self.processed_dir`
folder in order to skip the processing."""
files = to_list(self.processed_file_names)
return [osp.join(self.processed_dir, f) for f in files]
def _download(self):
if files_exist(self.raw_paths): # pragma: no cover
return
makedirs(self.raw_dir)
self.download()
def _process(self):
if files_exist(self.processed_paths): # pragma: no cover
return
print("Processing...")
makedirs(self.processed_dir)
self.process()
print("Done!")
def get_evaluator(self):
return accuracy
def get_loss_fn(self):
return cross_entropy_loss
def __getitem__(self, idx): # pragma: no cover
r"""Gets the data object at index :obj:`idx` and transforms it (in case
a :obj:`self.transform` is given)."""
data = self.get(idx)
data = data if self.transform is None else self.transform(data)
return data
@property
def num_classes(self):
r"""The number of classes in the dataset."""
y = self.data.y
return y.max().item() + 1 if y.dim() == 1 else y.size(1)
def __repr__(self): # pragma: no cover
return "{}({})".format(self.__class__.__name__, len(self))
class MultiGraphDataset(Dataset):
def __init__(self, root=None, transform=None, pre_transform=None, pre_filter=None):
super(MultiGraphDataset, self).__init__(root, transform, pre_transform, pre_filter)
self.data, self.slices = None, None
@property
def num_classes(self):
r"""The number of classes in the dataset."""
y = self.data.y
return y.max().item() + 1 if y.dim() == 1 else y.size(1)
def len(self):
for item in self.slices.values():
return len(item) - 1
return 0
def _get(self, idx):
data = self.data.__class__()
if hasattr(self.data, "__num_nodes__"):
data.num_nodes = self.data.__num_nodes__[idx]
for key in self.data.__old_keys__():
item, slices = self.data[key], self.slices[key]
start, end = slices[idx].item(), slices[idx + 1].item()
if torch.is_tensor(item):
s = list(repeat(slice(None), item.dim()))
s[self.data.__cat_dim__(key, item)] = slice(start, end)
elif start + 1 == end:
s = slices[start]
else:
s = slice(start, end)
data[key] = item[s]
return data
def get(self, idx):
try:
idx = int(idx)
except Exception:
idx = idx
if torch.is_tensor(idx):
idx = idx.numpy().tolist()
if isinstance(idx, int):
if self.slices is not None:
return self._get(idx)
return self.data[idx]
elif len(idx) > 1:
if self.slices is not None:
return [self._get(int(i)) for i in idx]
return [self.data[i] for i in idx]
@staticmethod
def from_data_list(data_list):
keys = [set(data.keys) for data in data_list]
keys = list(set.union(*keys))
assert "batch" not in keys
batch = Graph()
batch.__slices__ = {key: [0] for key in keys}
for key in keys:
batch[key] = []
cumsum = {key: 0 for key in keys}
batch.batch = []
num_nodes_cum = [0]
for i, data in enumerate(data_list):
for key in data.keys:
item = data[key]
if torch.is_tensor(item) and item.dtype != torch.bool:
item = item + cumsum[key]
if torch.is_tensor(item):
size = item.size(data.cat_dim(key, data[key]))
else:
size = 1
batch.__slices__[key].append(size + batch.__slices__[key][-1])
cumsum[key] = cumsum[key] + data.__inc__(key, item)
batch[key].append(item)
# if key in follow_batch:
# item = torch.full((size,), i, dtype=torch.long)
# batch["{}_batch".format(key)].append(item)
num_nodes = data.num_nodes
if num_nodes is not None:
num_nodes_cum.append(num_nodes + num_nodes_cum[-1])
item = torch.full((num_nodes,), i, dtype=torch.long)
batch.batch.append(item)
if num_nodes is None:
batch.batch = None
for key in batch.keys:
item = batch[key][0]
if torch.is_tensor(item):
batch[key] = torch.cat(batch[key], dim=data_list[0].cat_dim(key, item))
elif isinstance(item, int) or isinstance(item, float):
batch[key] = torch.tensor(batch[key])
elif isinstance(item, Adjacency):
target = Adjacency()
for k in item.keys:
if k == "row" or k == "col":
_item = torch.cat(
[x[k] + num_nodes_cum[i] for i, x in enumerate(batch[key])], dim=item.cat_dim(k, None)
)
elif k == "row_ptr":
_item = torch.cat(
[x[k][:-1] + num_nodes_cum[i] for i, x in enumerate(batch[key][:-1])],
dim=item.cat_dim(k, None),
)
_item = torch.cat([_item, batch[key][-1] + num_nodes_cum[-1]], dim=item.cat_dim(k, None))
else:
_item = torch.cat([x[k] for i, x in enumerate(batch[key])], dim=item.cat_dim(k, None))
target[k] = _item
batch[key] = target.to(item.device)
return batch.contiguous()
def __len__(self):
return len(self.data)
| sultanalnahian/gg-principle-classifier | graphmethods/cogdl/cogdl/data/dataset.py | dataset.py | py | 9,302 | python | en | code | 0 | github-code | 13 |
25888710772 | from flask import Flask, request, jsonify
from manipulateK6 import testJarServer
app = Flask(__name__)
@app.route('/actTest/<testType>/<testApi>/<userCount>')
def testK6(testType,testApi,userCount):
print(testType)
print(testApi)
print(userCount)
#print(testType,testApi,userCount)
# res = testJarServer(testType, testApi,int(userCount))
avgRequestProcessTime, perRequestNum, requestNum = testJarServer(testType, testApi,int(userCount))
return jsonify({"avg":avgRequestProcessTime, "per":perRequestNum, "req":requestNum})
if __name__ == "__main__":
app.run(host='0.0.0.0', port=8080, debug=True)
| offMomySon/Spring_performance_test | ForK6/clientForK6.py | clientForK6.py | py | 641 | python | en | code | 0 | github-code | 13 |
31236858489 | def homework_5(matrix, start, end, total): # 請同學記得把檔案名稱改成自己的學號(ex.1104813.py)
# 一個矩陣存取節點到節點間的路徑,一個矩陣存取節點中插入的中轉點。
path = [[-1]*total for _ in range(total)] #設計一個n*n矩陣來填入path
A = [[0]*total for _ in range(total) ] #設計一個n*n矩陣來填入初始數對應步數
for i in range(len(matrix)): #將題目給的數值及對應步數丟到矩陣A
if i>=len(matrix):
break
for j in range(total):
tmp_i = matrix[i][j]
tmp_j = matrix[i][j+1]
A[tmp_i-1][tmp_j-1] = matrix[i][j+2]
break
for i in range(total): #將A矩陣內不是對角線且數值為零的設為無限大
for j in range(total):
if i != j and A[i][j] == 0:
A[i][j] = float("inf")
for k in range(total): #算出最佳距離並將path更新
for i in range(total):
for j in range(total):
if A[i][k] + A[k][j] < A[i][j]:
A[i][j] = min(A[i][k] + A[k][j], A[i][j])
path[i][j] = k+1
lst = [start] #先將起始點嘉進list
getpath(start, end, path, lst) #走遞迴回傳走的順序
lst.append(end) #再將結束點加入list
ans_lst = ""
for x in lst: #將list換成str
ans_lst += str(x)
step = A[start-1][end-1]
if step == float("inf") or step == 0: #如果是0代表是起始點跟終點一樣,inf則代表沒有方法走到終點
step = -1
ans_lst = None
ans_lst = [step, ans_lst]
return ans_lst
def getpath(start, end, path, lst):
if path[start-1][end-1] != -1:
getpath(start, path[start-1][end-1], path, lst)
lst.append(path[start-1][end-1])
getpath(path[start-1][end-1], end, path, lst)
if __name__ == '__main__':
matrix = [[1,2,1],[1,3,3],[2,1,2],[3,4,4]]
start = 2; end = 4; total = 4
print(homework_5(matrix, start, end, total))
| daniel880423/Member_System | file/hw5/1100421/hw5_s1100421_4.py | hw5_s1100421_4.py | py | 2,071 | python | en | code | 0 | github-code | 13 |
36150509226 | # import pdb; pdb.set_trace()
import argparse
import itertools
import matplotlib.pyplot as plt
import matplotlib.gridspec as gridspec
import numpy as np
import seaborn as sns
import pandas as pd
from seaborn_fig_2_grid import SeabornFig2Grid
from collections import defaultdict
plt.style.use('seaborn')
plt.rc('font', size=24)
plt.rc('figure', titlesize=36)
sns.set_style("whitegrid")
sns.set(font_scale=2)
# NOTE: to reproduce Figure 4, use `--fig grid`
def main():
parser = argparse.ArgumentParser()
parser.add_argument("--target", type=str,
default="abs_mean_from_oracle")
parser.add_argument("--fig", type=str,
default="grid")
parser.add_argument("--text_seed", type=int, default=None)
parser.add_argument("--struc_seed", type=int, default=None)
args = parser.parse_args()
valid_targets = ['mean_from_oracle', 'abs_mean_from_oracle']
assert args.target in valid_targets, valid_targets
ylabel = "Causal Error"
xlabel = "Test Accuracy"
dfs = []
for dataset in ['trivial_0801', 'lda_0727', 'gpt2_0803']:
dataset_name = {"gpt2": "GPT2", "lda": "LDA", "trivial": "Trivial"}
dataset_name = dataset_name[dataset.split("_")[0]]
for method in ["Prop", "IPW", "ME"]:
infn = "acc_err_{}_{}_{}.csv".format(dataset, method.lower(), args.target)
df = pd.read_csv(infn)
df.columns = [col.strip() for col in df.columns]
df.rename({'target': ylabel, 'test_acc': xlabel}, inplace=True, axis='columns')
df = df.assign(dataset=[dataset_name for _ in range(df.shape[0])])
df = df.assign(method=[method for _ in range(df.shape[0])])
if args.text_seed is not None:
df = df[df["text_seed"] == args.text_seed]
if args.struc_seed is not None:
df = df[df["struc_seed"] == args.struc_seed]
dfs.append(df)
df = pd.concat(dfs, ignore_index=True)
methods = ["Prop", "IPW", "ME"]
datasets = ["Trivial", "LDA", "GPT2"]
markers = dict(zip(datasets, ["x", "+", "o", "*"]))
# colors = dict(zip(methods, sns.color_palette(palette="colorblind", n_colors=4)))
colors = sns.color_palette(palette="colorblind", n_colors=4)
if args.fig == 'grid':
fig = plt.figure(figsize=(16, 16))
gs = gridspec.GridSpec(3, 3)
xlim = (0.4, 1.0)
ylim = (0.0, 0.4)
idx = 0
for row, method in enumerate(methods):
for col, dataset in enumerate(datasets):
marker = 'o'
where = np.logical_and(df["method"] == method, df["dataset"] == dataset)
g = sns.JointGrid(hue='struc_seed', palette="pastel",
data=df[where], x=xlabel, y=ylabel, xlim=xlim, ylim=ylim)
g.plot_joint(sns.kdeplot, fill=False, alpha=0.8)
g.plot_marginals(sns.kdeplot, fill=True, bw_adjust=0.5)
g.plot_joint(sns.scatterplot, marker=marker, alpha=0.5)
ax = g.fig.axes[0]
ax.get_legend().remove()
where2 = np.all([df[where][xlabel] >= xlim[0], df[where][xlabel] <=xlim[1],
df[where][ylabel] >= ylim[0], df[where][ylabel] <= ylim[1]], axis=0)
print("{} x {} = {:.1f}".format(method, dataset, 100 * np.mean(where2)))
xticks = [0.4, 0.6, 0.8, 1.0]
ax.set_xticks(xticks)
if row == 2:
ax.set_xticklabels(map(str, xticks))
else:
ax.set_xticklabels([])
ax.set_xlabel('')
yticks = [0.0, 0.2, 0.4]
ax.set_yticks(yticks)
if col == 0:
ax.set_yticklabels(map(str, yticks))
else:
ax.set_yticklabels([])
ax.set_ylabel('')
# Row/Col labels from:
# https://stackoverflow.com/questions/25812255/row-and-column-headers-in-matplotlibs-subplots
if row == 0:
g.fig.axes[1].annotate(
dataset, xy=(0.5, 1), xytext=(0, 5), xycoords='axes fraction',
textcoords='offset points', size='large', ha='center', va='baseline')
if col == 0:
ax = g.fig.axes[0]
ax.annotate(
method, xy=(-0.4, 0.5), xytext=(-ax.yaxis.labelpad - 5, 0),
xycoords='axes fraction', textcoords='offset points',
size='large', ha='right', va='center')
SeabornFig2Grid(g, fig, gs[idx])
idx += 1
gs.tight_layout(fig)
gs.update(left=0.15, top=0.95)
# plt.show()
outfn = "test_grid{}.png".format(str(args.text_seed) if args.text_seed else "")
plt.savefig(outfn)
elif args.fig == 'lda_ipw':
# fig = plt.subplots(1, 1, figsize=(8, 8))
method, dataset = "IPW", "LDA"
xlim = (0.4, 1.0)
ylim = (0.0, 2.0)
where = np.logical_and(df["method"] == method, df["dataset"] == dataset)
g = sns.JointGrid(hue='struc_seed', palette="pastel",
data=df[where], x=xlabel, y=ylabel, xlim=xlim, ylim=ylim)
g.plot_joint(sns.kdeplot, fill=False, alpha=0.8)
g.plot_marginals(sns.kdeplot, fill=True, bw_adjust=0.5)
g.plot_joint(sns.scatterplot, marker='o', alpha=0.5)
ax = g.fig.axes[0]
yticks = [0.0, 0.4, 0.8, 1.2, 1.6, 2.0]
ax.set_yticks(yticks)
leg = ax.get_legend()
if leg is not None:
leg.remove()
where2 = np.all([df[where][xlabel] >= xlim[0], df[where][xlabel] <=xlim[1],
df[where][ylabel] >= ylim[0], df[where][ylabel] <= ylim[1]], axis=0)
print("{} x {} = {:.1f}".format(method, dataset, 100 * np.mean(where2)))
# plt.show()
outfn = "ipw_lda.png"
plt.savefig(outfn)
elif args.fig == 'mean':
fig, axs = plt.subplots(1, 3, sharey=True)
for col, dataset in enumerate(datasets):
for row, method in enumerate(methods):
color = colors[method]
where = np.logical_and(df["method"] == method, df["dataset"] == dataset)
step = 0.02
x = np.arange(0.5, 1.01, step)
y = []
# for cutoff in x:
# y.append(np.mean(df["Causal Error"][np.logical_and(where, df["Test Accuracy"] > cutoff)]))
for center in x:
y.append(np.mean(df["Causal Error"][np.all(
[where, df["Test Accuracy"] > center - step / 2,
df["Test Accuracy"] < center + step / 2], axis=0)]))
axs[col].set_ylim([0, 0.4])
axs[col].set_yticks([0, 0.1, 0.2, 0.3, 0.4])
axs[col].plot(x, y, color=color, label=method)
axs[col].set_title(dataset)
axs[col].set_xticks([0.5, 0.75, 1.0])
axs[col].set_xticklabels([.5, .75, 1.])
axs[0].legend()
plt.show()
elif args.fig == 'seed_mean':
fig, axs = plt.subplots(3, 3, sharex=True, sharey=True)
for col, dataset in enumerate(datasets):
for row, method in enumerate(methods):
# for row, method in enumerate(methods[:2]):
# for row, method in enumerate(methods[2:]):
for struc_seed in range(1, 5):
color = "C" + str(struc_seed)
where = np.all([df["method"] == method, df["dataset"] == dataset,
df["struc_seed"] == struc_seed], axis=0)
# for text_seed in range(1, 5):
# color = "C" + str(text_seed)
# where = np.all([df["method"] == method, df["dataset"] == dataset,
# df["text_seed"] == text_seed], axis=0)
step = 0.02
accs = df["Test Accuracy"][where]
x = np.arange(np.min(accs), np.max(accs), step)
means, mins, maxs, p025s, p975s = [], [], [], [], []
for center in x:
data = df["Causal Error"][np.all(
[where, df["Test Accuracy"] > center - step / 2,
df["Test Accuracy"] < center + step / 2], axis=0)]
if data.shape[0] > 0:
a, b = np.percentile(data, [2.5, 97.5])
p025s.append(a)
p975s.append(b)
means.append(np.mean(data))
mins.append(np.min(data))
maxs.append(np.max(data))
elif len(means) > 0:
p025s.append(p025s[-1])
p975s.append(p025s[-1])
means.append(means[-1])
mins.append(mins[-1])
maxs.append(maxs[-1])
else:
p025s.append(np.nan)
p975s.append(np.nan)
means.append(np.nan)
mins.append(np.nan)
maxs.append(np.nan)
axs[row, col].set_ylim([0, 0.4])
axs[row, col].set_yticks([0, 0.4])
axs[row, col].plot(x, means, color=color, alpha=0.8)
axs[row, col].fill_between(x, p025s, p975s, color=color, alpha=0.4)
# axs[col].set_ylim([0, 0.4])
# axs[col].set_yticks([0, 0.4])
# axs[col].plot(x, means, color=color, alpha=0.8)
# axs[col].fill_between(x, p025s, p975s, color=color, alpha=0.4)
# axs[row, col].fill_between(x, mins, maxs, color=color, alpha=0.2)
# axs[0, 0].legend()
plt.show()
if __name__ == "__main__":
main()
| sayuj-choudhari/Causal-Inference-SURF-2023 | acc_vs_err_jointplot.py | acc_vs_err_jointplot.py | py | 8,846 | python | en | code | 0 | github-code | 13 |
42702913631 | class crud():
def __init__(self):
import os
self.os = os
print('Cadastro')
#Função para ler os dados cadastrados
def ler(self):
if not self.os.path.exists('base_dados.txt'):
escrita = open('base_dados.txt', 'w')
escrita.write('')
escrita.close()
leitura = open('base_dados.txt', 'r')
retorno = ''
for linha in leitura:
retorno += linha.strip() + '\n'
leitura.close()
if len(retorno)>0:
return retorno
else:
return 'nenhum registro encontrado'
#Função para gerar os Id
def codigo_id(self):
self.ler()
leitura = open('base_dados.txt', 'r')
ultima_linha = ''
for linha in leitura:
ultima_linha = linha.strip()
if len(ultima_linha)<=0:
ultima_linha = 'ID: 0000000'
id = int(ultima_linha[ultima_linha.find('ID: ')+4:11])+1
id = str(id).rjust(7,'0')
leitura.close()
return id
#Método para a inserção de dados
def cadastrar(self, nome = '', idade=''):
id = self.codigo_id()
nome = str(nome).ljust(29, ' ')
idade = str(idade).rjust(3, '0')
texto = self.ler()
if texto == 'nenhum registro encontrado':
texto = ''
escrita = open('base_dados.txt', 'w')
linha = f'ID: {id} - Nome: {nome} - Idade: {idade} anos'
escrita.write(texto + linha)
escrita.close()
#Método para alteração de dados
def alterar(self, id = -1, nome = '', idade = ''):
if len(id)< 7:
id = str(id).rjust(7, '0')
nome = str(nome).ljust(25, ' ')
idade = str(idade).rjust(3, '0')
texto = self.ler()
linhas = texto.split('\n')
escrita = open('base_dados.txt', 'r')
texto = ''
for linha in linhas:
if linha.find('ID: {id}')>=0:
if len(nome.strip())<=0:
nome = linha[linha.find('NOME: ')+6:linha.find(' - IDADE')]
if '000' in idade:
idade = linha[linha.find('IDADE: ')+7:linha.find(' anos')]
texto += (f'ID: {id} - Nome: {nome} - Idade: {idade} anos\n')
else:
texto += linha +'\n'
escrita.write(texto.strip())
escrita.close()
#Método para exclusão de registros
def deletar(self, id= -1):
if len(str(id))< 7:
id = str(id).rjust(7, '0')
texto = self.ler()
linhas = texto.split('\n')
escrita = escrita = open('base_dados.txt', 'w')
texto = ''
for linha in linhas:
if linha.find(f'ID: {id}')>=0:
texto += ''
else:
texto += linha + '\n'
escrita.write(texto.strip())
escrita.close()
#Função para consultar os registros
def consultar(self, id=-1):
if len(id)<7:
id = str(id).rjust(7,'0')
if not self.os.path.exists('base_dados.txt'):
escrita = open('base_dados.txt', 'w')
escrita.write('')
leitura = open('base_dados.txt', 'r')
retorno = ''
for linha in leitura:
if linha.find(f'ID: {id}')>=0:
retorno = linha.strip()+ '\n'
leitura.close()
return retorno
#Metodo principal para executar o CRUD
def executar(self):
opcao = 1
while opcao >0:
print('\n(1) - LÊ \n(2) - CADASTRA \n(3) - ALTERA \n(4) - DELETA \n(5) - CONSULTA \n(0) - SAIR')
opcao = int(input('Informe a sua opção: '))
if opcao == 1:
print('Dados\n'+ self.ler())
elif opcao == 2:
nome = str(input('Nome: '))
idade = int(input('Idade: '))
self.cadastrar(nome,idade)
print('Registro cadastrado com sucesso')
elif opcao == 3:
id = int(input('ID: '))
nome = str(input('Nome: '))
idade = int(input('Idade: '))
self.alterar(id,nome,idade)
print('Dados Alterados com sucesso')
elif opcao == 4:
id = int(input('ID: '))
self.deletar(id)
print('Registro excluido com sucesso')
elif opcao == 5:
id = int(input('ID: '))
linhas = self.consultar(id)
print('Dados \n'+ linhas)
else:
print('Encerrado')
| luizsouza1993/Data_Science_Python | crud.py | crud.py | py | 4,795 | python | pt | code | 0 | github-code | 13 |
7092502557 | from typing import List
class Solution:
def isMonotonic(self, nums: List[int]) -> bool:
ascendente = 'true'
for i in range(len(nums)-1):
if nums[i]>nums[i+1]:
ascendente = 'false'
break
descendente = 'true'
for i in range(len(nums)-1):
if nums[i]<nums[i+1]:
descendente = 'false'
break
if ascendente == 'true' or descendente == 'true':
return(True)
else:
return(False)
objeto = Solution()
#nums = [1,2,2,3]
#nums = [6,5,4,4]
nums = [1,3,2]
print(objeto.isMonotonic(nums)) | alexandreborgmann/leetcode | MonotonicArray.py | MonotonicArray.py | py | 660 | python | en | code | 0 | github-code | 13 |
31708786603 | from django.conf.urls import patterns, include, url
# Uncomment the next two lines to enable the admin:
from django.contrib import admin
admin.autodiscover()
#from haystack.views import SearchView
urlpatterns = patterns('',
# Examples:
url(r'^$', 'kudosapp.views.home', name='home'),
url(r'^directory/$', 'kudosapp.views.dir', name='dir'),
#url(r'^$', 'kudosapp.views.basic_search', name='search'),
url(r'^search/$', 'kudosapp.views.basic_search', name='search'),
# url(r'^$', SearchView(), name='haystack_search'),
# url(r'^kudos/', include('kudos.foo.urls')),
#Uncomment the admin/doc line below to enable admin documentation:
url(r'^admin/doc/', include('django.contrib.admindocs.urls')),
#Uncomment the next line to enable the admin:
url(r'^admin/', include(admin.site.urls)),
)
| exiao/TMKudos | kudos/kudos/urls.py | urls.py | py | 833 | python | en | code | 0 | github-code | 13 |
36333372835 | """
k 그룹으로 나눈다 -> k-1 개의 경계를 만들어야 한다.
가장 차이가 큰 숫자 사이에 경계를 만들면 최대이득이다.
=> 그 차이만큼 최종값에서 사라지고 0이 된다
"""
import sys
n, k = map(int, sys.stdin.readline().strip().split())
student = list(map(int, sys.stdin.readline().strip().split()))
# print(student)
answer = 0
if n == k:
print(answer)
sys.exit(0)
# 초기값 설정
answer = student[len(student)-1] - student[0]
diff = []
for i in range(len(student)-1):
diff.append(student[i+1]-student[i])
diff = sorted(diff)
diff.reverse()
for i in diff:
if k == 1:
break
answer -= i
k -= 1
print(answer) | bywindow/Algorithm | src/Greedy/백준_행복유치원_G5.py | 백준_행복유치원_G5.py | py | 694 | python | ko | code | 0 | github-code | 13 |
31722774603 | import pickle
from collections import defaultdict
import matplotlib
import matplotlib.pyplot as plt
import pandas as pd
from matplotlib.font_manager import FontProperties
matplotlib.rcParams['font.family'] = 'Microsoft JhengHei'
font = FontProperties()
font.set_size('xx-small')
colormap = 'tab20'
pdm = 'data/draw-data/poi_demand_match.pickle'
pdm = pickle.load(open(pdm, 'rb'))
src, dst = defaultdict(set), defaultdict(set)
for (d, p, t), v in pdm.items():
if d == 'src':
src[p].add((t, v))
elif d == 'dst':
dst[p].add((t, v))
src = {k: [i[1] for i in sorted(v, key=lambda a: a[0])] for k, v in src.items()}
dst = {k: [i[1] for i in sorted(v, key=lambda a: a[0])] for k, v in dst.items()}
cols = set(src.keys()).intersection(dst.keys())
src = pd.DataFrame(src, index=range(1, 24), columns=cols)
dst = pd.DataFrame(dst, index=range(1, 24), columns=cols)
def draw(name, data, ftype):
figure = plt.figure()
data.plot(kind=ftype, colormap=colormap, ax=figure.gca())
lgd = plt.legend(loc='center left', bbox_to_anchor=(1.0, 0.5))
plt.xlabel(name)
plt.savefig(f'data/plot/{name}.png', bbox_extra_artists=(lgd,), bbox_inches='tight')
# draw('Pickup', src, 'area')
# draw('Dropout', dst, 'area')
for col in cols:
draw(col, pd.DataFrame({
'Pickup': src[col],
'Dropout': dst[col]
}), 'line')
| exiaohu/multi-mode-route-rec | scripts/draw_poi_demand_pic.py | draw_poi_demand_pic.py | py | 1,362 | python | en | code | 6 | github-code | 13 |
22544026648 | import sys
input = sys.stdin.readline
stack = []
n = int(input())
for _ in range(n):
x = input().strip()
if "push" in x:
stack.append(x.split()[1])
elif x == "size":
print(len(stack))
elif x == "empty":
if len(stack) != 0:
print("0")
else:
print("1")
elif x == "pop":
if len(stack) != 0:
print(stack.pop())
else:
print("-1")
elif x == "top":
if len(stack) != 0:
print(stack[-1])
else:
print("-1") | WeeYoungSeok/python_coding_study | class_2/class_2_10.py | class_2_10.py | py | 559 | python | en | code | 0 | github-code | 13 |
12617186898 |
class CreditCard:
def __init__(self, card_no, balance):
self.card_no = card_no
self.balance = balance
"""
Vinay_card_details : {
1001 : 2500,
2002 : 3000
}
"""
class InvalidCase(Exception):
def __init__(self, price, balance):
msg = "The actual price is : ", price, " but your balance is : ", balance
print("Message : ", msg)
super().__init__(msg)
class WrongCard(Exception):
pass
class Customer:
def __init__(self, cards):
self.cards = cards
def purchase_items(self, card_no, price):
if price < 0 :
raise InvalidCase("The price is printed incorrectly")
if card_no not in self.cards:
raise WrongCard("The given card details is not Valid")
if price > self.cards[card_no].balance :
raise InvalidCase(price, self.cards[card_no].balance)
# raise InvalidCase("The balance is insufficient to purchase the items")
card1 = CreditCard(1001, 2500)
card2 = CreditCard(2002, 3000)
cards = {
card1.card_no : card1,
card2.card_no : card2
}
# Aggregstion : "has-A" relationship
customer = Customer(cards)
while(True):
card_no = int(input("Enter the Card no : "))
try:
customer.purchase_items(card_no, 3000)
print("The transaction has been done Successfully")
break
except InvalidCase as ic:
print("Inside Invalid case exeption")
print(str(ic))
except WrongCard as wc:
print("Inside Wrong Card case exeption")
print(str(wc))
except Exception as e :
print(e)
# print("Some error has been occured")
| Vinaykuresi/Python_Full_Stack_Aug_2022 | Python/OOPS/Exception_handling/custom_exception.py | custom_exception.py | py | 1,659 | python | en | code | 0 | github-code | 13 |
33935961447 | # import faulthandler
import logging
import os
import sys
import time
from pathlib import Path
from typing import Union
import networkx as nx
import numpy as np
from graph_tool import Graph
from network_dismantling import dismantler_wrapper
from network_dismantling.FINDER_ND.FINDER import FINDER
from network_dismantling._sorters import dismantling_method
local_dir = os.path.dirname(__file__) + os.sep
sys.path.append(local_dir)
model_file_path = local_dir + 'models/'
model_file_path = Path(model_file_path)
model_file_path = model_file_path.resolve()
if not model_file_path.exists():
raise FileNotFoundError(f"Model file path {model_file_path} does not exist")
elif not model_file_path.is_dir():
raise NotADirectoryError(f"Model file path {model_file_path} is not a directory")
dqn = FINDER()
def to_networkx(g):
from io import BytesIO
from networkx import read_graphml
print("Converting graph to NetworkX")
with BytesIO() as io_buffer:
g.save(io_buffer, fmt='graphml')
io_buffer.seek(0)
try:
gn = read_graphml(io_buffer, node_type=str)
except Exception as e:
raise e
# Map nodes to consecutive IDs to avoid issues with FINDER
mapping = {k: i for i, k in enumerate(gn.nodes)}
gn = nx.relabel_nodes(gn, mapping)
return gn
@dismantler_wrapper
def _finder_nd(network: Graph, reinsertion=True, strategy_id=0,
model_file_ckpt: Union[str, Path] = 'nrange_30_50_iter_78000.ckpt',
step_ratio=0.01, reinsert_step=0.001,
logger=logging.getLogger("dummy"), **kwargs
):
"""
Implements interface to FINDER ND (no cost).
This function merges the GetSolution and EvaluateSolution functions.
Note that the default parameters are the same as provided in the author's code.
:param network:
:param reinsertion:
:param model_file_ckpt:
:param strategy_id:
:param step_ratio:
:param reinsert_step:
:param kwargs:
:return:
"""
from graph_tool.all import remove_parallel_edges, remove_self_loops
remove_parallel_edges(network)
remove_self_loops(network)
# # Convert the network to NetworkX Graph
nx_graph = to_networkx(network)
print("Getting static ids")
static_id = nx.get_node_attributes(nx_graph, "static_id")
# GetSolution BEGIN
model_file = model_file_path / model_file_ckpt
print("Loading model")
print('The best model is :%s' % (model_file))
dqn.LoadModel(model_file)
print("Getting solution")
solution, solution_time = dqn.EvaluateRealData(g=nx_graph,
# model_file=model_file,
# data_test=data_test,
# save_dir=save_dir,
stepRatio=step_ratio,
)
# GetSolution END
# print("Done getting solution")
if reinsertion is True:
print("Reinserting nodes")
# EvaluateSolution BEGIN
t1 = time.time()
# strategyID: 0:no insert; 1:count; 2:rank; 3:multiply
# This function returns the solution after the reinsertion steps
reinsert_solution, Robustness, MaxCCList = dqn.EvaluateSol(g=nx_graph,
# data_test=data_test,
# sol_file=solution,
solution=solution,
strategyID=strategy_id,
reInsertStep=reinsert_step,
)
t2 = time.time()
solution_time = t2 - t1
solution = reinsert_solution
# EvaluateSolution END
# print("Done reinserting nodes")
output = np.zeros(network.num_vertices())
for n, p in zip(solution, list(reversed(range(1, len(solution))))):
# output[static_id[f"n{n}"]] = p
# assert static_id[n] == n, f"static_id[n] = {static_id[n]} != n = {n}"
output[static_id[n]] = p
# print("Done ordering nodes")
return output
method_info = {
"source": "https://github.com/FFrankyy/FINDER/tree/master/code/FINDER_ND",
}
@dismantling_method(name="FINDER ND",
short_name="FINDER",
includes_reinsertion=False,
plot_color="#9467bd",
**method_info)
def FINDER_ND(network, **kwargs):
return _finder_nd(network, reinsertion=True, **kwargs)
| NetworkDismantling/review | network_dismantling/FINDER_ND/python_interface.py | python_interface.py | py | 4,788 | python | en | code | 6 | github-code | 13 |
40016824701 | from mpi4py import MPI
if __name__ == '__main__':
N = 10
L = []
comm = MPI.COMM_WORLD
rank = comm.Get_rank()
nb_proc = comm.Get_size()
for i in range(int(N/nb_proc*rank), int(N/nb_proc*(rank+1))):
L.append(i)
data = comm.gather(L, root=0)
if rank == 0:
print(data)
| lechevaa/Test_NR440 | main.py | main.py | py | 318 | python | en | code | 0 | github-code | 13 |
72758493777 | CLASS_TO_COLOR = {
'f2f' : (0, 1, 0), # Green
'df' : (1, 0, 0), # Red
'fs' : (0, 1, 1), # Cyan
'icf' : (1, 0.6, 0), # Orange
'gann': (1, 0.7, 0.8), # Pink
'x2f' : (0, 0, 1) # Blue
}
CLASS_TO_LABEL = {
'real' : 'Real',
'df' : 'Deepfakes',
'f2f' : 'Face2Face',
'fs' : 'FaceSwap',
'icf' : 'ICface',
'gann' : 'GANnotation',
'x2f' : 'X2Face'
}
| jcbrockschmidt/face-forgery-detection | scripts/visualize/common.py | common.py | py | 426 | python | fr | code | 3 | github-code | 13 |
72865887057 | import os
import glob
import cdms2
import numpy as np
import numpy.ma as ma
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
import matplotlib.colors as colors
import cartopy.crs as ccrs
from cartopy.mpl.ticker import LongitudeFormatter, LatitudeFormatter
plotTitle = {'fontsize': 11.5}
plotSideTitle = {'fontsize': 9.5}
panel = [(0.1691, 0.6810, 0.6465, 0.2258),
(0.1691, 0.3961, 0.6465, 0.2258),
(0.1691, 0.1112, 0.6465, 0.2258),
]
def add_cyclic(var):
lon = var.getLongitude()
print('type(lon): {}'.format(type(lon)))
return var(longitude=(lon[0], lon[0] + 360.0, 'coe'))
def get_ax_size(fig, ax):
bbox = ax.get_window_extent().transformed(fig.dpi_scale_trans.inverted())
width, height = bbox.width, bbox.height
width *= fig.dpi
height *= fig.dpi
return width, height
def plot_panel(n, fig, proj, var, title):
#var = add_cyclic(var)
lon = var.getLongitude()
lat = var.getLatitude()
var = ma.squeeze(var.asma())
# Contour levels
# Contour plot
ax = fig.add_axes(panel[n], projection=proj)
ax.set_global()
p1 = ax.contourf(lon, lat, var,
transform=ccrs.PlateCarree(),
extend='both',
)
ax.set_aspect('auto')
ax.coastlines(lw=0.3)
ax.set_title(title, fontdict=plotTitle)
ax.set_xticks([0, 60, 120, 180, 240, 300, 359.99], crs=ccrs.PlateCarree())
ax.set_yticks([-90, -60, -30, 0, 30, 60, 90], crs=ccrs.PlateCarree())
lon_formatter = LongitudeFormatter(
zero_direction_label=True, number_format='.0f')
lat_formatter = LatitudeFormatter()
ax.xaxis.set_major_formatter(lon_formatter)
ax.yaxis.set_major_formatter(lat_formatter)
ax.tick_params(labelsize=8.0, direction='out', width=1)
ax.xaxis.set_ticks_position('bottom')
ax.yaxis.set_ticks_position('left')
# Color bar
cbax = fig.add_axes(
(panel[n][0] + 0.6635, panel[n][1] + 0.0215, 0.0326, 0.1792))
cbar = fig.colorbar(p1, cax=cbax)
w, h = get_ax_size(fig, cbax)
cbar.ax.tick_params(labelsize=9.0, length=0)
def plot(test, reference, diff, parameter, fnm):
# Create figure, projection
figsize = [8.5, 11]
dpi = 150
fig = plt.figure(figsize=figsize, dpi=dpi)
proj = ccrs.PlateCarree(central_longitude=180)
# First two panels
plot_panel(0, fig, proj, test, parameter.test_title)
plot_panel(1, fig, proj, reference, parameter.reference_title)
# Third panel
plot_panel(2, fig, proj, diff, parameter.diff_title)
# Figure title
fig.suptitle(parameter.main_title, x=0.5, y=0.96, fontsize=14)
# Save figure
print('Saving diff plot: {}'.format(fnm + '.png'))
plt.savefig(fnm + '.png')
def run(args):
variables = args.vars
output_dir = args.output_dir
start_yr = args.start_yrs
end_yr = args.end_yrs
###cdat_p = '/export/shaheen2/e3sm_diags_timeseries/cdat_climo_results/20180129.DECKv1b_piControl.ne30_oEC.edison_SON_climo.nc'
#cdat_p = '/export/shaheen2/e3sm_diags_timeseries/ncclimo_climo_results/20180129.DECKv1b_piControl.ne30_oEC.edison_SON_climo.nc'
###nco_p = '/export/shaheen2/e3sm_diags_timeseries/ncclimo_climo_results/20180129.DECKv1b_piControl.ne30_oEC.edison_SON_climo.nc'
cdat_paths = glob.glob(os.path.join(output_dir, 'cdat_climo_results', '*'))
# nco_paths = glob.glob(os.path.join(output_dir, 'ncclimo_climo_results', '*'))
nco_path_dir = os.path.join(output_dir, 'ncclimo_climo_results')
output_dir = os.path.join(output_dir, 'diff_results')
if not os.path.exists(output_dir):
os.mkdir(output_dir)
class Namespace:
pass
p = Namespace()
p.test_title = 'CDAT'
p.reference_title = 'ncclimo'
p.diff_title = 'CDAT - ncclimo'
for cdat_p in cdat_paths:
f = cdat_p.split('/')[-1]
nco_p = os.path.join(nco_path_dir, f)
if not os.path.exists(nco_p):
print('File not found, skipping plot for: {}'.format(nco_p))
continue
case_id = cdat_p.split('/')[-1]
season = case_id.split('_')[-2]
case_id = case_id.split('_')[0:-3]
case_id = '.'.join(case_id)
cdat_f = cdms2.open(cdat_p)
nco_f = cdms2.open(nco_p)
print(cdat_f.variables)
print(nco_f.variables)
for v in variables:
print('\ncdat file: {}'.format(cdat_p))
print('nco file: {}'.format(nco_p))
print('variable: {}'.format(v))
cdat_data = cdat_f(v)
nco_data = nco_f(v)
diff = cdat_data - nco_data
p.main_title = '{} {} {}, {} to {}'.format(case_id, season, v, start_yr, end_yr)
fnm = os.path.join(output_dir, 'diff_{}_{}_{}_{}_{}'.format(case_id, season, v, start_yr, end_yr))
plot(cdat_data, nco_data, diff, p, fnm)
| zshaheen/e3sm_time_series | diff.py | diff.py | py | 4,903 | python | en | code | 1 | github-code | 13 |
73261176018 | from flask import Blueprint, render_template, request, redirect, abort
from flask.helpers import url_for
from flask_login import login_required, logout_user, login_user
from models import User
from urllib.parse import urlparse, urljoin
from dotenv import load_dotenv
import atexit
from flask_login import login_required, current_user
from flask_login import LoginManager
from blogs_defn import addNewBlogPost, getAllBlogs, fetchBlog
from pprint import pprint
# from db2Api.users import createUser
blog = Blueprint('blog', __name__)
def is_safe_url(target):
ref_url = urlparse(request.host_url)
test_url = urlparse(urljoin(request.host_url, target))
return test_url.scheme in ('http', 'https') and \
ref_url.netloc == test_url.netloc
@blog.route("/blogs")
def blogs():
blogs = getAllBlogs()
# print(blogs)
return render_template("blog/blog.html", blogs=blogs)
@blog.route("/add_blog", methods=['POST', 'GET'])
@login_required
def addBlog():
if current_user.category == 'Admin' and (request.method == 'POST'):
title = request.form.get('title', '')
description = request.form.get('description', '')
sub_title = request.form.get('sub-title', '')
sub_description = request.form.get('sub-description', '')
thumbnail = request.form.get('thumbnail', '')
# print(current_user.emailid, len(current_user.emailid), title, description, sub_title, sub_description, thumbnail)
addNewBlogPost(admin_emailid=current_user.emailid, title=title, description=description,
sub_title=sub_title, sub_description=sub_description, thumbnail=thumbnail)
return redirect(url_for('.blogs'))
elif current_user.category == 'Admin':
return render_template("blog/add_blog.html")
else:
return redirect(url_for('.blogs'))
@blog.route("/read_blog/<int:id>")
def readBlog(id):
blog = fetchBlog(id)
pprint(blog)
return render_template('blog/readblog.html', blog=blog)
| YashKandalkar/eco-mart | blogs.py | blogs.py | py | 2,005 | python | en | code | 1 | github-code | 13 |
27044781406 | import factory
from unittest import mock
from .models import Submission
from .provider import get_or_create_submission_result
class SubmissionFactory(factory.django.DjangoModelFactory):
class Meta:
model = Submission
def test_get_result_if_reply_was_evaluated():
reply = "test reply"
SubmissionFactory(
id=1, reply=reply, status=Submission.SubmissionStatusChoice.CORRECT
)
submission = get_or_create_submission_result(reply=reply)
assert submission.id == 1
assert submission.status == Submission.SubmissionStatusChoice.CORRECT
@mock.patch("solution_verification_provider.provider.post_submission")
@mock.patch("solution_verification_provider.provider.get_submission")
def test_get_exception_and_entity_in_db_if_reply_wasnt_evaluated_yet(
get_submission,
post_submission,
):
post_submission.return_value = [100, Submission.SubmissionStatusChoice.EVALUATION]
get_submission.return_value = [100, Submission.SubmissionStatusChoice.EVALUATION]
submissions_qty = Submission.objects.count()
get_or_create_submission_result(reply="test_reply")
assert Submission.objects.count() == submissions_qty + 1
assert post_submission.call_count == 1
assert get_submission.call_count == 0
# second submission shouldn't create entity in DB
get_or_create_submission_result(reply="test_reply")
assert Submission.objects.count() == submissions_qty + 1
assert post_submission.call_count == 1
assert get_submission.call_count == 1
@mock.patch(
"solution_verification_provider.provider.get_submission",
mock.MagicMock(return_value=[100, Submission.SubmissionStatusChoice.CORRECT]),
)
def test_get_result_if_reply_was_evaluated_between_calls():
reply = "test reply"
submission = SubmissionFactory(
id=100, reply=reply, status=Submission.SubmissionStatusChoice.EVALUATION
)
returned_submission = get_or_create_submission_result(reply=reply)
submission.refresh_from_db()
assert submission.id == returned_submission.id == 100
assert (
submission.status
== returned_submission.status
== Submission.SubmissionStatusChoice.CORRECT
)
| Akay7/solving-code-problems | backend/solution_verification_provider/tests.py | tests.py | py | 2,183 | python | en | code | 0 | github-code | 13 |
35959218043 | # Rhys Dunn - 2015
# Learning image vision/OpenCV
# import libraries
import numpy as np
import cv2
# load image
img = cv2.imread("money.jpg")
# prep image - blur and convert to grey scale
grey = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
blurred = cv2.GaussianBlur(grey, (17, 17), 0)
# show blurred image and grey scaled image
cv2.imshow("grey scale", grey)
cv2.imshow("blurred", blurred)
cv2.waitKey(0)
# canny edge detector
outline = cv2.Canny(blurred, 30, 150)
# show canny edge detector
cv2.imshow("The edges", outline)
cv2.waitKey(0)
# find the contours
(cnts, _) = cv2.findContours(outline, cv2.RETR_EXTERNAL,
cv2.CHAIN_APPROX_SIMPLE)
# draw contours: -1 will draw all contours
cv2.drawContours(img, cnts, -1, (0, 255, 0), 2)
cv2.imshow("Result", img)
cv2.waitKey(0)
# Print how many coins we found
print("I found %i coins" % len(cnts)) | kineticR/Image-coin-counter | coin counter.py | coin counter.py | py | 867 | python | en | code | 10 | github-code | 13 |
31494294262 | # Faça um programa para imprimir:
# 1
# 2 2
# 3 3 3
# .....
# n n n n n n ... n
# para um n informado pelo usuário. Use uma função que receba um valor n inteiro e imprima até a n-ésima linha.
#
while True:
try:
a = int(input('Número de valores: '))
break
except:
print('Comando Inválido')
for i in range(a):
print(f'{i+1} '*(i+1)) | GuilhermeMastelini/Exercicios_documentacao_Python | Funções/Lição 1.py | Lição 1.py | py | 413 | python | pt | code | 0 | github-code | 13 |
16129665243 | #!/usr/bin/python3
from PIL import Image, ImageOps
def add_border(input_image, output_image, border):
img = Image.open(input_image)
if isinstance(border, int) or isinstance(border, tuple):
bimg = ImageOps.expand(img, border=border)
else:
raise RuntimeError("Border is not an integer or tuple!")
bimg.save(output_image)
def add_color_border(input_image, output_image, border, color=0):
img = Image.open(input_image)
if isinstance(border, int) or isinstance(border, tuple):
bimg = ImageOps.expand(img, border=border, fill=color)
else:
msg = "Border is not an integer or tuple!"
raise RuntimeError(msg)
bimg.save(output_image)
if __name__ == "__main__":
add_border(
"images/butterfly.png", output_image="images/butterfly_border.png", border=100
)
add_color_border(
"images/butterfly.png",
output_image="images/butterfly_color_border.png",
border=100,
color="indianred",
)
| udhayprakash/PythonMaterial | python3/11_File_Operations/03_multimedia/a_image_files/13_applying_borders.py | 13_applying_borders.py | py | 1,005 | python | en | code | 7 | github-code | 13 |
18606006524 | from scenario_builder import Scenario
from scenario_builder.openbach_functions import StartJobInstance
from scenario_builder.helpers.service.dash import dash_client, dash_client_and_server
from scenario_builder.helpers.postprocessing.time_series import time_series_on_same_graph
from scenario_builder.helpers.postprocessing.histogram import cdf_on_same_graph
SCENARIO_NAME = 'service_video_dash'
SCENARIO_DESCRIPTION = """This scenario launches one DASH transfer.
It can then, optionally, plot the bit rate using time-series and CDF.
NB : the entities logic is the following :
- server sends DASH content
- client requests for and receives DASH content
"""
def video_dash_client_and_server(server_entity, client_entity, server_ip, duration, protocol, tornado_port, scenario_name=SCENARIO_NAME):
scenario = Scenario(scenario_name, SCENARIO_DESCRIPTION)
dash_client_and_server(scenario, server_entity, client_entity, server_ip, duration, protocol, tornado_port)
return scenario
def video_dash_client(client_entity, server_ip, duration, protocol, tornado_port, scenario_name=SCENARIO_NAME):
scenario = Scenario(scenario_name, SCENARIO_DESCRIPTION)
dash_client(scenario, client_entity, server_ip, duration, protocol, tornado_port)
return scenario
def build(server_entity, client_entity, server_ip, duration, protocol, tornado_port, launch_server=False, post_processing_entity=None, scenario_name=SCENARIO_NAME):
if launch_server:
scenario = video_dash_client_and_server(server_entity, client_entity, server_ip, duration, protocol, tornado_port, scenario_name)
else:
scenario = video_dash_client(client_entity, server_ip, duration, protocol, tornado_port, scenario_name)
if post_processing_entity is not None:
post_processed = list(scenario.extract_function_id('dashjs_client'))
legends = ['dash from {} to {}'.format(server_entity, client_entity)]
jobs = [function for function in scenario.openbach_functions if isinstance(function, StartJobInstance)]
time_series_on_same_graph(
scenario,
post_processing_entity,
post_processed,
[['bitrate']],
[['Rate (b/s)']],
[['Rate time series']],
[legends],
wait_finished=jobs,
wait_delay=5)
cdf_on_same_graph(
scenario,
post_processing_entity,
post_processed,
100,
[['bitrate']],
[['Rate (b/s)']],
[['Rate CDF']],
[legends],
wait_finished=jobs,
wait_delay=5)
return scenario
| CNES/openbach-extra | apis/scenario_builder/scenarios/service_video_dash.py | service_video_dash.py | py | 2,730 | python | en | code | 0 | github-code | 13 |
18998687347 | import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from dwave.system import DWaveSampler, EmbeddingComposite
import dwave.inspector as inspector
from mapping_qubits import *
def Merge(dict_1, dict_2):
result = dict_1 | dict_2
return result
# {(0, 0): 4, (1, 1): 3, (0, 1): 10}
def auto_embedding(matrix):
sampler_manual = DWaveSampler(solver={'topology__type': 'chimera'}) # pegasus
embedding = EmbeddingComposite(sampler_manual)
running = embedding.sample_qubo(matrix, num_reads=1000)
print(running)
inspector.show(running)
return running
def auto_embedding_chain(matrix, chain = 1):
sampler_manual = DWaveSampler(solver={'topology__type': 'chimera'})
embedding = EmbeddingComposite(sampler_manual)
running = embedding.sample_qubo(matrix, num_reads=1000, chain_strength = chain)
print(running)
inspector.show(running)
def manual_embedding(qubit_biases, coupler_strengths, shots = 2):
sampler_manual = DWaveSampler(solver={'topology__type': 'chimera'})
Q = {**qubit_biases, **coupler_strengths}
sampleset = sampler_manual.sample_qubo(Q, num_reads=shots)
print(sampleset)
inspector.show(sampleset)
set = np.array([1, 2, 3])
size = np.size(set)
suma = sum(set)
dimentions = int(np.floor(size/4)) # Cantidad de chimeras diagonales a usar (arrancando en cero)
qubo = np.outer(2*set, 2*set) - 2*suma*np.diag(2*set)
print(qubo[1, :])
auto_embedding_chain(qubo, 6)
variables = size - 1
shift = 0 # Arrancamos en el qubit 16*8
Q = dict()
C = dict()
'''
for i in range(0, variables + 1):
map = qubits_dict(i, variables, qubo, shift)
qubits = map[0]
chains = map[1]
Q = Merge(Q, qubits)
C = Merge(C, chains)
manual_embedding(Q, C, 1000)
# auto_embedding(qubo)
'''
'''
for i in range(0, variables + 1):
map = triangular_embedding(i, variables, qubo, shift)
qubits = map[0]
chains = map[1]
Q = Merge(Q, qubits)
C = Merge(C, chains)
manual_embedding(Q, C, 1000)
# auto_embedding_chain(qubo, chain = np.max(np.absolute(qubo)))
''' | fedeFuidio/quantum_embedding | quadratic_embedding.py | quadratic_embedding.py | py | 2,071 | python | en | code | 0 | github-code | 13 |
32798979231 | """Install Dallinger as a command line utility."""
import pathlib
from setuptools import setup
# The directory containing this file
HERE = pathlib.Path(__file__).parent
README = (HERE / "README.md").read_text(encoding="utf-8")
setup_args = dict(
name="dallinger",
packages=["dallinger", "dallinger_scripts"],
version="9.11.0a1",
description="Laboratory automation for the behavioral and social sciences",
long_description=README,
long_description_content_type="text/markdown",
url="http://github.com/Dallinger/Dallinger",
maintainer="Jordan Suchow",
maintainer_email="suchow@berkeley.edu",
license="MIT",
keywords=["science", "cultural evolution", "experiments", "psychology"],
classifiers=[
"Development Status :: 4 - Beta",
"Intended Audience :: Science/Research",
"Topic :: Scientific/Engineering",
"License :: OSI Approved :: MIT License",
"Programming Language :: Python :: 3.8",
"Programming Language :: Python :: 3.9",
"Programming Language :: Python :: 3.10",
"Programming Language :: Python :: 3.11",
"Framework :: Pytest",
],
include_package_data=True,
zip_safe=False,
entry_points={
"console_scripts": [
"dallinger = dallinger.command_line:dallinger",
"dallinger-housekeeper = dallinger.command_line:dallinger_housekeeper",
"dallinger_heroku_web = dallinger_scripts.web:main",
"dallinger_heroku_worker = dallinger_scripts.worker:main",
"dallinger_heroku_clock = dallinger_scripts.clock:main",
],
"dallinger.experiments": [],
"pytest11": ["pytest_dallinger = dallinger.pytest_dallinger"],
},
install_requires=[
"APScheduler",
"cached-property",
"boto3",
"build",
"click",
"faker",
"Flask-Sock",
"Flask",
"flask-crossdomain",
"flask-login",
"Flask-WTF",
"future",
"gevent",
"greenlet",
"gunicorn[gevent]",
"heroku3",
"ipython < 8.13",
"localconfig",
"numpy < 1.25",
"pandas < 2.1",
"pexpect",
"pip >= 20",
"pip-tools",
"psycopg2",
"psutil",
"pyopenssl",
"redis",
"requests",
"rq",
"selenium",
"six",
"SQLAlchemy < 2",
"sqlalchemy-postgres-copy",
"tabulate",
"tenacity",
"timeago",
"tzlocal",
"ua-parser",
"user-agents",
# Newer versions of Werkzeug are not compatible with the current
# Flask-Login release
"Werkzeug < 3.0.0",
],
extras_require={
"jupyter": [
"ipywidgets",
"jupyter",
"jupyter-server",
],
"data": [
"pandas",
"tablib[all]",
],
"dev": [
"alabaster",
"black",
"bump2version",
"coverage",
"coverage_pth",
"flake8",
"isort",
"mock",
"myst-parser",
"pre-commit",
"pycodestyle",
"pypandoc",
"pytest",
"pytest-rerunfailures",
"sphinx < 7.2",
"sphinx_rtd_theme",
"sphinxcontrib-applehelp <= 1.0.4",
"sphinxcontrib-devhelp <= 1.0.2",
"sphinxcontrib-htmlhelp <= 2.0.1",
"sphinxcontrib-qthelp <= 1.0.3",
"sphinxcontrib-serializinghtml <= 1.1.5",
"sphinxcontrib-spelling",
"tox",
],
"docker": ["docker", "paramiko", "sshtunnel"],
},
)
setup(**setup_args)
| Dallinger/Dallinger | setup.py | setup.py | py | 3,751 | python | en | code | 113 | github-code | 13 |
38577954561 | import logging
from logging.handlers import TimedRotatingFileHandler
def get_standard_logger(pgm_name):
logger = logging.getLogger('eyesone')
if logger.hasHandlers():
return logger
# 로그 레벨 설정
logger.setLevel(logging.DEBUG)
# 콘솔 출력 핸들러
stream_handler = logging.StreamHandler()
logger.addHandler(stream_handler)
# 파일 저장 핸들러
# 1시간마다 로그파일 교체
log_dir = './log/'
log_filename = 'eyesone_information.log'
file_handler = logging.handlers.TimedRotatingFileHandler(filename=log_dir+log_filename, when='h', interval=1)
logger.addHandler(file_handler)
# 로그 포멧 설정
formatter = logging.Formatter('[%(asctime)s](%(levelname)s)%(name)s: %(message)s')
stream_handler.setFormatter(formatter)
file_handler.setFormatter(formatter)
return logger | mrbluesky0123/eyesone-game | score_system/common/logger.py | logger.py | py | 918 | python | ko | code | 1 | github-code | 13 |
6263396224 | import cv2
windowName = "threshold image"
trackbarValue = "threshold scale"
scaleFactor = 0
maxScale = 255
imagePath = "CoinsB.png"
src = cv2.imread(imagePath, cv2.IMREAD_GRAYSCALE)
cv2.namedWindow(windowName,cv2.WINDOW_AUTOSIZE)
def threshold_image(*args):
global scaleFactor
scaleFactor = 0 + args[0]
# change the thresholding type and see how it changes the result!
th,dst = cv2.threshold(src,scaleFactor,maxScale,cv2.THRESH_BINARY)
cv2.imshow(windowName,dst)
cv2.createTrackbar(trackbarValue, windowName, scaleFactor, maxScale, threshold_image)
threshold_image(0)
while True:
c = cv2.waitKey(20)
if c==27:
break
cv2.destroyAllWindows() | chrismarti343/coin-recognition | taskbar.py | taskbar.py | py | 686 | python | en | code | 0 | github-code | 13 |
40322204265 | import numpy as np
from utils.OfflineDataLoader import OfflineDataLoader
from base.BaseRecommender import RecommenderSystem
from base.BaseRecommender_SM import RecommenderSystem_SM
from base.RecommenderUtils import check_matrix, to_okapi, to_tfidf
try:
from base.Cython.Similarity import Similarity
except ImportError:
print("Unable to load Cython Cosine_Similarity, reverting to Python")
from base.Similarity_old import Similarity_old
from base.Similarity.Compute_Similarity import Compute_Similarity
class ItemKNNCFRecommender(RecommenderSystem, RecommenderSystem_SM):
RECOMMENDER_NAME = "ItemKNNCFRecommender"
def __init__(self, URM_train, sparse_weights=True):
super(ItemKNNCFRecommender, self).__init__()
self.FEATURE_WEIGHTING_VALUES = ["BM25", "TF-IDF", "none"]
self.URM_train = check_matrix(URM_train, 'csr')
self.sparse_weights = sparse_weights
self.dataset = None
def __repr__(self):
representation = "Item KNN Collaborative Filtering "
return representation
def fit(self, topK=400, shrink=200, similarity='cosine',feature_weighting="BM25", normalize=True,save_model=False,best_parameters=False, offline=False,submission=False,location="submission",**similarity_args):
#similarity_args = {'tversky_alpha': 0.8047100184165605, 'tversky_beta': 1.9775806370926445}
#self.feature_weighting = feature_weighting
if offline:
m = OfflineDataLoader()
folder_path_icf, file_name_icf = m.get_model(self.RECOMMENDER_NAME,training=(not submission))
self.loadModel(folder_path=folder_path_icf,file_name=file_name_icf)
else:
if best_parameters:
m = OfflineDataLoader()
folder_path_icf, file_name_icf = m.get_parameter(self.RECOMMENDER_NAME)
self.loadModel(folder_path=folder_path_icf,file_name=file_name_icf)
#similarity_args = {'normalize': True, 'shrink': 0, 'similarity': 'tversky', 'topK': 20, 'tversky_alpha': 0.18872151621891953, 'tversky_beta': 1.99102432161935}
similarity_args = {'feature_weighting': 'BM25', 'normalize': True, 'shrink': 200, 'similarity': 'cosine', 'topK': 400}
if self.feature_weighting == "none":
pass
if self.feature_weighting == "BM25":
self.URM_train_copy = self.URM_train.astype(np.float32)
self.URM_train_copy = to_okapi(self.URM_train)
elif self.feature_weighting == "TF-IDF":
self.URM_train_copy = self.URM_train.astype(np.float32)
self.URM_train_copy = to_tfidf(self.URM_train)
similarity = Compute_Similarity(self.URM_train_copy, **similarity_args)
else:
self.topK = topK
self.shrink = shrink
self.feature_weighting = feature_weighting
if self.feature_weighting == "BM25":
self.URM_train_copy = self.URM_train.astype(np.float32)
self.URM_train_copy = to_okapi(self.URM_train)
elif self.feature_weighting == "TF-IDF":
self.URM_train_copy = self.URM_train.astype(np.float32)
self.URM_train_copy = to_tfidf(self.URM_train)
if self.feature_weighting == "none":
similarity = Compute_Similarity(self.URM_train, shrink=shrink, topK=topK, normalize=normalize,
similarity=similarity, **similarity_args)
else:
similarity = Compute_Similarity(self.URM_train_copy, shrink=shrink, topK=topK, normalize=normalize,
similarity=similarity, **similarity_args)
self.parameters = "sparse_weights= {0}, similarity= {1}, shrink= {2}, neighbourhood={3}, normalize={4}".format(
self.sparse_weights, similarity, shrink, topK, normalize)
if self.sparse_weights:
self.W_sparse = similarity.compute_similarity()
else:
self.W = similarity.compute_similarity()
self.W = self.W.toarray()
if save_model:
self.saveModel("saved_models/"+location+"/",file_name=self.RECOMMENDER_NAME+"_"+location+"_model")
| yigitozgumus/PolimiRecSys2018 | models/KNN/Item_KNN_CFRecommender.py | Item_KNN_CFRecommender.py | py | 4,404 | python | en | code | 0 | github-code | 13 |
41524497173 | Data = [
"Moris",
"Male",
"Japan",
"30-06-1998",
"moriskha@gmail.com",
"017896524",
]
Data2 = [
"Rojina",
"Female",
"Japan",
"30-06-1998",
"moriskha@gmail.com",
"017896524",
]
Gender = Data[1]
if Gender == "Male":
name = "Boy"
he = "He"
his = "his"
else:
name = "Girls"
he = "She"
his = "Her"
Ssentance = f" {Data[0]} is a good {name}. {he} lives in {Data[2]}.{his} " \
f"Birthday is {Data[3]}. {his} Phone NUmber is {Data[5]}. {he} have a mail that is {Data[4]}"
print(Ssentance) | raqib4you/Learn-Paython | class5/first.py | first.py | py | 615 | python | en | code | 1 | github-code | 13 |
15303394086 | from copy import deepcopy
from typing import List
from sortedcontainers import SortedList
class Solution:
def minAbsoluteDifference(self, nums: List[int], x: int) -> int:
if x == 0:
return 0
arr, best_dist = SortedList([]), float("inf")
for i in range(x, len(nums)):
arr.add(nums[i - x])
v = nums[i]
pos = arr.bisect_left(v)
if pos < len(arr):
best_dist = min(best_dist, abs(arr[pos] - v))
if pos > 0:
best_dist = min(best_dist, abs(arr[pos - 1] - v))
return best_dist
testcases = []
testcases.append(([4, 3, 2, 4], 2, 0))
testcases.append(([5, 3, 2, 10, 15], 1, 1))
testcases.append(([1, 2, 3, 4], 3, 3))
solution = Solution()
for testcase in testcases:
testcase_copy = deepcopy(testcase)
output = getattr(solution, dir(solution)[-1])(*testcase[:-1])
if output != testcase[-1]:
getattr(solution, dir(solution)[-1])(*testcase_copy[:-1])
assert (
False
), f"testcase: {testcase[:-1]}, expected: {testcase[-1]}, output: {output}"
| JosephLYH/leetcode | leetcode/2817.py | 2817.py | py | 1,124 | python | en | code | 0 | github-code | 13 |
17040776314 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import json
from alipay.aop.api.constant.ParamConstants import *
class AlipayFincoreComplianceCaasMerchantlevelConsultModel(object):
def __init__(self):
self._amount = None
self._app_name = None
self._app_token = None
self._biz_type = None
self._event_code = None
self._merchant_name = None
self._merchant_pid = None
self._order_count = None
self._request_id = None
self._uscc = None
@property
def amount(self):
return self._amount
@amount.setter
def amount(self, value):
self._amount = value
@property
def app_name(self):
return self._app_name
@app_name.setter
def app_name(self, value):
self._app_name = value
@property
def app_token(self):
return self._app_token
@app_token.setter
def app_token(self, value):
self._app_token = value
@property
def biz_type(self):
return self._biz_type
@biz_type.setter
def biz_type(self, value):
self._biz_type = value
@property
def event_code(self):
return self._event_code
@event_code.setter
def event_code(self, value):
self._event_code = value
@property
def merchant_name(self):
return self._merchant_name
@merchant_name.setter
def merchant_name(self, value):
self._merchant_name = value
@property
def merchant_pid(self):
return self._merchant_pid
@merchant_pid.setter
def merchant_pid(self, value):
self._merchant_pid = value
@property
def order_count(self):
return self._order_count
@order_count.setter
def order_count(self, value):
self._order_count = value
@property
def request_id(self):
return self._request_id
@request_id.setter
def request_id(self, value):
self._request_id = value
@property
def uscc(self):
return self._uscc
@uscc.setter
def uscc(self, value):
self._uscc = value
def to_alipay_dict(self):
params = dict()
if self.amount:
if hasattr(self.amount, 'to_alipay_dict'):
params['amount'] = self.amount.to_alipay_dict()
else:
params['amount'] = self.amount
if self.app_name:
if hasattr(self.app_name, 'to_alipay_dict'):
params['app_name'] = self.app_name.to_alipay_dict()
else:
params['app_name'] = self.app_name
if self.app_token:
if hasattr(self.app_token, 'to_alipay_dict'):
params['app_token'] = self.app_token.to_alipay_dict()
else:
params['app_token'] = self.app_token
if self.biz_type:
if hasattr(self.biz_type, 'to_alipay_dict'):
params['biz_type'] = self.biz_type.to_alipay_dict()
else:
params['biz_type'] = self.biz_type
if self.event_code:
if hasattr(self.event_code, 'to_alipay_dict'):
params['event_code'] = self.event_code.to_alipay_dict()
else:
params['event_code'] = self.event_code
if self.merchant_name:
if hasattr(self.merchant_name, 'to_alipay_dict'):
params['merchant_name'] = self.merchant_name.to_alipay_dict()
else:
params['merchant_name'] = self.merchant_name
if self.merchant_pid:
if hasattr(self.merchant_pid, 'to_alipay_dict'):
params['merchant_pid'] = self.merchant_pid.to_alipay_dict()
else:
params['merchant_pid'] = self.merchant_pid
if self.order_count:
if hasattr(self.order_count, 'to_alipay_dict'):
params['order_count'] = self.order_count.to_alipay_dict()
else:
params['order_count'] = self.order_count
if self.request_id:
if hasattr(self.request_id, 'to_alipay_dict'):
params['request_id'] = self.request_id.to_alipay_dict()
else:
params['request_id'] = self.request_id
if self.uscc:
if hasattr(self.uscc, 'to_alipay_dict'):
params['uscc'] = self.uscc.to_alipay_dict()
else:
params['uscc'] = self.uscc
return params
@staticmethod
def from_alipay_dict(d):
if not d:
return None
o = AlipayFincoreComplianceCaasMerchantlevelConsultModel()
if 'amount' in d:
o.amount = d['amount']
if 'app_name' in d:
o.app_name = d['app_name']
if 'app_token' in d:
o.app_token = d['app_token']
if 'biz_type' in d:
o.biz_type = d['biz_type']
if 'event_code' in d:
o.event_code = d['event_code']
if 'merchant_name' in d:
o.merchant_name = d['merchant_name']
if 'merchant_pid' in d:
o.merchant_pid = d['merchant_pid']
if 'order_count' in d:
o.order_count = d['order_count']
if 'request_id' in d:
o.request_id = d['request_id']
if 'uscc' in d:
o.uscc = d['uscc']
return o
| alipay/alipay-sdk-python-all | alipay/aop/api/domain/AlipayFincoreComplianceCaasMerchantlevelConsultModel.py | AlipayFincoreComplianceCaasMerchantlevelConsultModel.py | py | 5,317 | python | en | code | 241 | github-code | 13 |
4845201132 | from flask import Blueprint, jsonify, session, request
from flask_login import login_required
from app.models import Tag, db
import json
tag_routes = Blueprint('tags', __name__)
@tag_routes.route('/', methods=['POST'])
@login_required
def createTag(userid):
req_data = json.loads(request.data)
tag_name = req_data['name']
existing_tag = Tag.query.filter(Tag.title == tag_name).first()
if existing_tag is None:
new_tag = Tag(title=tag_name, user_id=userid)
db.session.add(new_tag)
db.session.commit()
return new_tag.to_dict()
else:
return existing_tag.to_dict()
@tag_routes.route('/<int:tagid>', methods=['DELETE'])
@login_required
def deleteTag(userid, tagid):
tag = Tag.query.filter(Tag.id == tagid).first()
if tag is None:
return {'id': 0}
db.session.delete(tag)
db.session.commit()
return {'id': tagid}
| mjshuff23/evernote-clone | app/api/tag_routes.py | tag_routes.py | py | 899 | python | en | code | 16 | github-code | 13 |
13573393212 | import socket
import random
client = socket.socket(socket.AF_INET , socket.SOCK_STREAM)
client.connect((socket.gethostname() , 8080))
l,h,n = [int(i) for i in client.recv(1024).decode('utf-8').split('\n')]
print("You are worker number : " + str(n))
print(f"received limits : {l} {h}")
while True:
msg = client.recv(1024).decode("utf-8")
#if msg == '\n':
r = random.randrange(l,h,1)
client.send(str(r).encode('utf-8'))
print(f"sent {r}")
if msg == 'end':
print("You have reached the end!")
client.close()
break
else:
print(msg)
| sirabas369/Socket-programming-and-Multi-threading | client.py | client.py | py | 591 | python | en | code | 0 | github-code | 13 |
74518162256 | import argparse
import csv
import simplejson
class DataTransformer(object):
def transform(self, input_file, output_file):
year_header_map = {}
output_dict = []
with open(input_file, 'rb') as in_csv_file:
csv_reader = csv.reader(in_csv_file, delimiter=',')
for row in csv_reader:
if int(csv_reader.line_num) == 1:
for col_index in range(1,len(row)):
year = row[col_index].split(" ")[0]
budget_type = " ".join(row[col_index].split(" (")[0].split(" ")[1:])
year_header_map[col_index] = {"year" : year, "budget_type" : budget_type}
else:
row_slug = "".join(row[1:-1])
print(row_slug)
if not row_slug or (not "." in row_slug and not float(row_slug)):
continue
indicator = row[0].strip()
indicator_dict = {"name": indicator, "series": []}
for col_index in range(1,len(row)):
budget_type = year_header_map[col_index]["budget_type"]
year = year_header_map[col_index]["year"]
data_entered = False
for budget_dict in indicator_dict["series"]:
if "key" in budget_dict and budget_dict["key"] == budget_type:
budget_dict["values"].append({"label" : year, "value" : float(row[col_index].strip())})
data_entered = True
if not data_entered:
indicator_dict["series"].append({"key" : budget_type, "values":[{"label" : year, "value" : float(row[col_index].strip())}]})
output_dict.append(indicator_dict)
output_json = simplejson.dumps(output_dict)
output_file_obj = open(output_file, "w")
output_file_obj.write(output_json)
if __name__ == '__main__':
parser = argparse.ArgumentParser(description="Transforms input CSV file into JSON file")
parser.add_argument("input_file", help="Input CSV filepath")
parser.add_argument("output_file", help="Output JSON filepath")
args = parser.parse_args()
obj = DataTransformer()
if not args.input_file or not args.output_file:
print("Please pass input and output filepaths")
else:
obj.transform(args.input_file, args.output_file)
| cbgaindia/parsers | municipal_budget/csv_to_json.py | csv_to_json.py | py | 2,504 | python | en | code | 14 | github-code | 13 |
26020816672 | from bs4 import BeautifulSoup as bsoup
import requests as rq
import csv
url = "http://espn.go.com/mens-college-basketball/standings"
r = rq.get(url)
soup = bsoup(r.content)
trs = soup.find_all("table", class_=True)
with open("records.csv", "wb") as ofile:
f = csv.writer(ofile)
f.writerow(["Team","Record"])
for tr in trs:
tds = tr.find_all("td")[3:]
i = 0
check = "0-"
tencheck = "10-"
twentycheck = "20-"
thirtycheck = "30-"
while (i < len(tds)-2):
record = tds[i+2].get_text().encode("utf-8")
if record.find(check) == -1:
i = i + 3
else:
if record.find(tencheck) == -1 and record.find(twentycheck) == -1 and record.find(thirtycheck) == -1:
team = tds[i].get_text().encode("utf-8")
f.writerow([team, record])
i = i + 3
else:
i = i + 3 | adhan06/cbb-winless | Losers.py | Losers.py | py | 801 | python | en | code | 0 | github-code | 13 |
22074723955 | #!/usr/bin/env python3
"""
"""
from npoapi import Subtitles
import os
import json
def subtitles():
client = Subtitles().command_line_client(description="Set subtitles")
client.add_argument('mid|text', type=str, nargs=1, help='The mid for wich subtitles to get. Or form description')
client.add_argument('-S', '--search', action='store_true',
help="""The argument is interpreted as a text to search on""")
client.add_argument('language', type=str, nargs='?', default="nl", help='Language. Required when getting mid')
client.add_argument('type', type=str, nargs='?', default="CAPTION", help='', choices={"CAPTION", "TRANSLATION"})
args = client.parse_args()
mid_or_text = vars(args)['mid|text'][0]
language = args.language
search = args.search
if search:
form = mid_or_text
if not os.path.isfile(form) and not form.startswith("{") and not form.startswith("<"):
form = "{\"searches\": {\"text\": %s}}" % json.dumps(form)
print(client.search(form))
else:
mid = mid_or_text
print(client.get(mid, language, subtitle_type=args.type))
client.exit()
if __name__ == "__main__":
subtitles()
| npo-poms/pyapi | src/npoapi/bin/npo_subtitles.py | npo_subtitles.py | py | 1,217 | python | en | code | 0 | github-code | 13 |
73446225298 | # -*- coding: utf-8 -*-
__author__ = 'Ivan Cherednikov'
__email__ = 'ivch@nmbu.no'
class LCGRand:
def __init__(self, seed):
self.a = 16807
self.m = (2 ** 31) - 1
self.r = seed
def rand(self):
self.r = self.a*self.r % self.m
return self.r
class ListRand:
def __init__(self, numbers):
self.numbers = numbers
self.idx = 0
def rand(self):
if self.idx >= len(self.numbers): # hopefully this line is correct
raise RuntimeError
else:
self.idx += 1
return self.numbers[self.idx - 1]
if __name__ == "__main__":
lcg = LCGRand(241)
lrList = [4, 10, 5, 8, 6, 15]
lr = ListRand(lrList)
lcg_numb = []
lr_numb = []
for i in range(10):
lcg_numb.append(lcg.rand())
for i in range(len(lrList)):
lr_numb.append(lr.rand())
print(lcg_numb)
print(lr_numb)
| inoplanetka/INF200-2019-Exersices | src/ivan_cherednikov_ex/ex04/myrand.py | myrand.py | py | 921 | python | en | code | 0 | github-code | 13 |
24769767509 | from pygame.key import get_pressed
from pug.component import *
from pig import Scene
from pig.keyboard import keys
from pig.editor.agui import KeyDropdown
class Joystick_Button_To_Key( Component):
"""Convert joystick button presses to simulate keyboard key presses. This
component requires the Joystick_Input component. Note that strange effects can
occur if both the joystick and keyboard are used simultaneously.
To see the events being sent by the joystick, look at the console output of the
Joystick_Input component with test_mode set to True.
"""
# component_info
_set = 'pig'
_type = 'controls'
_class_list = [Scene]
# attributes:
_field_list = [
['joystick_id',"The joystick's ID number (event info: 'joy')"],
['button_0', KeyDropdown,
{'doc':'Key to simulate when button 0 is pressed'}],
['button_1', KeyDropdown,
{'doc':'Key to simulate when button 1 is pressed'}],
['button_2', KeyDropdown,
{'doc':'Key to simulate when button 2 is pressed'}],
['button_3', KeyDropdown,
{'doc':'Key to simulate when button 3 is pressed'}],
['button_4', KeyDropdown,
{'doc':'Key to simulate when button 4 is pressed'}],
['button_5', KeyDropdown,
{'doc':'Key to simulate when button 5 is pressed'}],
['button_6', KeyDropdown,
{'doc':'Key to simulate when button 6 is pressed'}],
['button_7', KeyDropdown,
{'doc':'Key to simulate when button 7 is pressed'}],
['button_8', KeyDropdown,
{'doc':'Key to simulate when button 8 is pressed'}],
['button_9', KeyDropdown,
{'doc':'Key to simulate when button 9 is pressed'}],
['button_10', KeyDropdown,
{'doc':'Key to simulate when button 10 is pressed'}],
['button_11', KeyDropdown,
{'doc':'Key to simulate when button 11 is pressed'}],
]
# defaults
joystick_id = 0
button_0 = "SPACE"
button_1 = None
button_2 = None
button_3 = None
button_4 = None
button_5 = None
button_6 = None
button_7 = None
button_8 = None
button_9 = None
button_10 = None
button_11 = None
def __init__( self, *a, **kw):
self.downbuttons = {}
Component.__init__( self, *a, **kw)
@component_method
def handle_joybuttondown( self, event):
if event.joy != self.joystick_id:
return
key = keys[getattr(self, "button_" + str(event.button))]
if key:
self.owner.do_key_callbacks( key)
@component_method
def handle_joybuttonup( self, event):
if event.joy != self.joystick_id:
return
key = keys[getattr(self, "button_" + str(event.button))]
if key:
self.owner.do_key_callbacks( key, keydict="KEYUP")
@component_method
def register_key_down( self, key, fn, *args, **kwargs):
"Check axis state when keys are registered"
# if we don't do this we can get key up messages with no corresponding
# key down
stick = self.owner.get_joystick(self.joystick_id)
if not stick:
return
n = 0
for button in [self.button_0, self.button_1, self.button_2,
self.button_3, self.button_4, self.button_5, self.button_6,
self.button_7, self.button_8, self.button_9, self.button_10,
self.button_11]:
if key == button and stick.get_button( n):
fn( *args, **kwargs)
return
n += 1
register_component( Joystick_Button_To_Key)
| sunsp1der/pug | pig/components/scene/Joystick_Button_To_Key.py | Joystick_Button_To_Key.py | py | 3,840 | python | en | code | 0 | github-code | 13 |
13238507045 | """
Class for FileToList
"""
class FileToList(object):
"""
FileToList is a helper class used to import text files and turn them into
lists, with each index in the list representing a single line from the
text file.
"""
@staticmethod
def to_list(file_path):
"""
Static method. Takes in a file path, and outputs a list of stings.
Each element in the list corresponds to a line in the file.
:param file_path: string file path
:return: A list of strings, with elements in the list corresponding
to lines in the file pointed to in file_path
"""
l = []
f = open(file_path, 'r')
for line in f:
l.append(line)
f.close()
return l
| samjabrahams/anchorhub | anchorhub/lib/filetolist.py | filetolist.py | py | 759 | python | en | code | 6 | github-code | 13 |
70696505939 | # Harry Potter has got the “n” number of apples. Harry has some students among whom he wants to distribute the apples. These “n” number of apples is provided to harry by his friends, and he can request for few more or a few less apples.
#
# You need to print whether a number is in range mn to mx, is a divisor of “n” or not.
# Input:
#
# Take input n, mn, and mx from the user.
#
# Output:
# Print whether the numbers between mn and mx are divisors of “n” or not. If mn=mx, show that this is not a range, and mn is equal to mx. Show the result for that number.
#
# Example:
# If n is 20 and mn=2 and mx=5
#
# 2 is a divisor of20
#
# 3 is not a divisor of 20
#
# …
#
# 5 is a divisor of 20
try:
n = int(input("Enter the number of apples\n"))
mn = int(input("Enter the minimum number of student\n"))
mx = int(input("Enter the maximum number of student\n"))
except ValueError:
print("No string pls")
exit()
if mn >= mx:
print("minimum can't be more or bigger than maximum")
for _ in range(mn, mx+1):
if n % _ == 0:
print(f"{_} is divisor of {n}")
else:
print(f"{_} is not divisor of {n}") | anant-harryfan/Python_basic_to_advance | PythonTuts/Python_Practise/Practise2.py | Practise2.py | py | 1,154 | python | en | code | 0 | github-code | 13 |
23240758168 | import torch
import math
from collections import deque
class Optim(torch.optim.Optimizer):
def __init__(self, params, lr=1e-3, betas=(0.9, 0.992, 0.9), eps=1e-7, k=4, alpha=0.5):
defaults = dict(lr=lr,
betas=betas,
eps=eps,
buffer=[[None, None, None, None] for _ in range(10)],
k=k,
alpha=alpha)
super(Optim, self).__init__(params, defaults)
#Lookahead
for group in self.param_groups:
group['counter'] = 0
def __setstate__(self, state):
super(Optim, self).__setstate__(state)
@torch.no_grad()
def step(self):
for group in self.param_groups:
for p in group['params']:
grad = p.grad.data
state = self.state[p]
if len(state) == 0:
state['step'] = 0
state['exp_avg'] = torch.zeros_like(p.data)
state['exp_avg_sq'] = torch.zeros_like(p.data)
state['previous_grad'] = torch.zeros_like(p.data)
state['exp_variance'] = torch.zeros_like(p.data)
state['end_gradient_mean'] = torch.zeros_like(p.data)
state['end_gradient_var'] = torch.zeros_like(p.data)
if state['step'] != 0:
grad = grad - torch.sqrt(state['exp_variance']) / state['exp_avg']
#Get previous exponential moving average.
exp_avg, exp_avg_sq, previous_grad, exp_variance = state['exp_avg'], state['exp_avg_sq'], state['previous_grad'], state['exp_variance']
#Get betas, lr, buffer and increase step.
beta1, beta2, beta3 = group['betas']
lr = group['lr']
state['step'] += 1
buffered = group['buffer'][int(state['step'] % 10)]
exp_avg_prev = exp_avg
exp_avg = torch.mul(exp_avg, beta1) + (1-beta1) * grad
exp_avg_sq = torch.mul(exp_avg_sq, beta2) + (1-beta2) * (grad*grad)
exp_variance = torch.mul(exp_variance, beta1) + (1-beta1) * (grad - exp_avg_prev) * (grad - exp_avg)
exp_std = torch.sqrt(exp_variance)
state['exp_avg'] = exp_avg
state['exp_avg_sq'] = exp_avg_sq
state['exp_variance'] = exp_variance
#Diff grad calculations.
diff_grad = torch.abs(previous_grad - grad)
dfc = torch.div(1.0, (1.0 + torch.exp(-diff_grad)))
state['previous_grad'] = grad
exp_avg = exp_avg * dfc
#Radam calculations.
if state['step'] == buffered[0]:
N_sma, step_size = buffered[1], buffered[2]
else:
buffered[0] = state['step']
beta2_t = beta2 ** state['step']
N_sma_max = 2/(1-beta2) - 1
N_sma = N_sma_max - 2 * state['step'] * beta2_t
buffered[1] = N_sma
if N_sma >= 5:
step_size = (lr * math.sqrt((1-beta2_t)
* (N_sma - 4)
/ (N_sma_max - 4)
* (N_sma - 4)
/ (N_sma)
* (N_sma_max)
/ (N_sma - 2)))
else:
step_size = lr / (1 - beta1 ** state['step'])
buffered[2] = step_size
if N_sma >= 5:
denom = exp_avg_sq.sqrt() + group['eps']
gradients = exp_avg / denom
gradients, state = self.gradient_noise(gradients, state, beta1)
step_in = gradients * -step_size
p.data = p.data + step_in
else:
exp_avg, state = self.gradient_noise(exp_avg, state, beta1)
p.data = p.data + (-step_size * exp_avg)
#Lookahead.
if group['counter'] == 0:
for fast in group['params']:
k, alpha = group['k'], group['alpha']
param_state = self.state['fast']
if 'slow_params' not in param_state:
param_state['slow_params'] = torch.clone(fast.data).detach()
slow = param_state['slow_params']
fast.data.mul(alpha).add(slow, alpha=1.0-alpha)
slow.data.copy_(fast)
group['counter'] = (group['counter'] + 1) % group['k']
def gradient_noise(self, gradients, state, beta):
end_gradient_mean = state['end_gradient_mean']
end_gradient_var = state['end_gradient_var']
end_gradient_mean_prev = end_gradient_mean
end_gradient_mean = torch.mul(end_gradient_mean, beta) + (1-beta) * (gradients)
end_gradient_var = torch.mul(end_gradient_var, beta) + \
((1-beta) * (gradients - end_gradient_mean_prev) * (gradients - end_gradient_mean))
end_gradient_std = torch.sqrt(end_gradient_var)
gradient_dist = torch.distributions.Normal(end_gradient_mean, end_gradient_std)
gradients = gradient_dist.sample()
state['end_gradient_var'] = end_gradient_var
state['end_gradient_mean'] = end_gradient_mean
return gradients, state
| reeshogue/Cozminimum | optim_v2.py | optim_v2.py | py | 4,300 | python | en | code | 0 | github-code | 13 |
43165814129 | import pytest
import requests
from connector import ConnectionService
import threading
import time
PORT = 5000
def getNodeList():
return [_service._ip,"192.168.1.21","192.168.1.17","192.168.1.22"]
_service = ConnectionService()
print(_service._ip,flush=True)
print(_service._name,flush=True)
_nodes = getNodeList()
_election = False
_coordinator = False
def answer():
global _election
if (_election):
_election = False
def compareIpHigher(one, two):
one = bytes(map(int, one.split('.')))
two = bytes(map(int, two.split('.')))
return one < two
def checkNodeAvailable(ip, path=''):
try:
res = requests.get('http://' + ip + ":" + str(PORT) + path)
res.raise_for_status()
except requests.exceptions.HTTPError:# as http_err:
# print(f'HTTP error occurred: {http_err}', flush=True)
return False
except Exception:
# print("Node available: FALSE", flush=True)
return False
# print("Node available: TRUE", flush=True)
return True
def _announceCoordinator( nodes):
global _coordinator
global _election
global _service
print("bully.announceCoordinator: ", nodes)
_election = False
_coordinator = True
# announce coordinator
for i in range(len(nodes)):
_service.sendCoordinatorMessage(nodes[i])
def _electionTimeout():
if _election:
_nodes.remove(_service._ip)
_announceCoordinator(_nodes)
def _sendElectionMessage():
global _service
global _election
global _nodes
global _coordinator
print('sendElectionMessage', flush=True)
_election = True
_nodes.remove(_service._ip)
# print("nodesList:", nodes, flush=True)
# send to higher numbered nodes
higher = list(filter(lambda x: compareIpHigher(_service._ip, x) , _nodes))
# if no higher nodes, you are leader
higher.sort(key=lambda x: bytes(map(int, x.split('.'))))
print("higher:", higher, flush=True)
if len(higher) < 1:
_announceCoordinator(_nodes)
else:
# elect highest node
_service.sendElectionMessage(higher[-1])
# set timer for checking answers
threading.Timer(3, _electionTimeout).start()
# ------------------------ TESTS FUNCTIONS BELOW ------------------------ #
def test_election_timeout():
global _coordinator
global _election
global _nodes
_election = True
_electionTimeout()
assert _coordinator == True
_election = False
_nodes.append(_service._ip)
def test_compareIpHigher():
ip_1 = "192.168.1.20"
ip_2 = "192.168.1.21"
# expected output:
# True if ip_2 > ip_1
assert compareIpHigher(ip_1,ip_2) == True
# expected output:
# False if ip_1 > ip_2
assert compareIpHigher(ip_2,ip_1) == False
def test_checkNodeAvailable_service_up():
# expected output
# True if Flask server is up and running
assert _service.checkNodeAvailable(_service._ip) == True
def test_answer():
answer()
assert _election == False
def test_sendElectionMessage():
global _nodes
_sendElectionMessage()
# b._election should be True as election has been started
assert _election == True
_nodes.append(_service._ip)
def test__checkNodeAvailable_service_down():
global _service
_service = None
# expected output
# Exception raised if there is no service manager
with pytest.raises(Exception):
_service.checkNodeAvailable(_service._ip)
| oludom/DPSproject3 | unit_test.py | unit_test.py | py | 3,565 | python | en | code | 0 | github-code | 13 |
29274420185 | from skimage.io import imread
from skimage.filters import threshold_otsu
import matplotlib.pyplot as plt
filename='video12.mp4'
import cv2
cap = cv2.VideoCapture(filename)
# cap = cv2.VideoCapture(0)
count = 0
while cap.isOpened():
ret,frame = cap.read()
if ret == True:
cv2.imshow('window-name',frame)
cv2.imwrite("./out/frame%d.jpg" % count, frame)
count = count + 1
if cv2.waitKey(10000):
break
else:
break
cap.release()
cv2.destroyAllWindows()
| rockysw/attendence1 | videocap.py | videocap.py | py | 515 | python | en | code | 0 | github-code | 13 |
268182460 | import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
plt.rcParams['font.sans-serif']=['SimHei'] #用来正常显示中文标签
plt.rcParams['axes.unicode_minus']=False #用来正常显示负号
data_train = pd.read_csv('../Datasets/titanic/train.csv')
data_test = pd.read_csv('../Datasets/titanic/test.csv')
# 利用pd返回数据的信息
# print(train_data.info())
'''
fig = plt.figure()
fig.set(alpha=0.2) # 设定图表颜色alpha参数
plt.subplot2grid((2,3),(0,0)) # 在一张大图里分列几个小图
train_data.Survived.value_counts().plot(kind='bar')# 柱状图
plt.title("获救情况 (1为获救)") # 标题
plt.ylabel("人数")
plt.subplot2grid((2,3),(0,1))
train_data.Pclass.value_counts().plot(kind="bar")
plt.ylabel("人数")
plt.title("乘客等级分布")
plt.subplot2grid((2,3),(0,2))
plt.scatter(train_data.Survived, train_data.Age)
plt.ylabel("年龄") # 设定纵坐标名称
plt.grid(b=True, which='major', axis='y')
plt.title("按年龄看获救分布 (1为获救)")
plt.subplot2grid((2,3),(1,0), colspan=2)
train_data.Age[train_data.Pclass == 1].plot(kind='kde')
train_data.Age[train_data.Pclass == 2].plot(kind='kde')
train_data.Age[train_data.Pclass == 3].plot(kind='kde')
plt.xlabel("年龄")# plots an axis lable
plt.ylabel("密度")
plt.title("各等级的乘客年龄分布")
plt.legend(('头等舱', '2等舱','3等舱'),loc='best') # sets our legend for our graph.
plt.subplot2grid((2,3),(1,2))
train_data.Embarked.value_counts().plot(kind='bar')
plt.title("各登船口岸上船人数")
plt.ylabel("人数")
plt.show()
#看看各乘客等级的获救情况
fig = plt.figure()
fig.set(alpha=0.2) # 设定图表颜色alpha参数
Survived_0 = train_data.Pclass[train_data.Survived == 0].value_counts()
Survived_1 = train_data.Pclass[train_data.Survived == 1].value_counts()
df=pd.DataFrame({'获救':Survived_1, '未获救':Survived_0})
df.plot(kind='bar', stacked=True)
plt.title("各乘客等级的获救情况")
plt.xlabel("乘客等级")
plt.ylabel("人数")
plt.show()
'''
# 看年龄
'''
Survived_Up = data_train.Survived[data_train.Age >= 25].value_counts()
Survived_Down = data_train.Survived[data_train.Age < 25].value_counts()
df=pd.DataFrame({u'高于25岁':Survived_Up, u'低于25岁':Survived_Down})
df.plot(kind='bar', stacked=True)
plt.title(u"按年龄看获救情况")
plt.xlabel(u"年龄")
plt.ylabel(u"人数")
plt.show()
'''
# 看看各性别的获救情况
'''
# fig = plt.figure()
# fig.set(alpha=0.2) # 设定图表颜色alpha参数
Survived_m = data_train.Survived[train_data.Sex == 'male'].value_counts()
Survived_f = data_train.Survived[train_data.Sex == 'female'].value_counts()
df=pd.DataFrame({u'男性':Survived_m, u'女性':Survived_f})
print(df)
df.plot(kind='bar', stacked=True)
plt.title(u"按性别看获救情况")
plt.xlabel(u"性别")
plt.ylabel(u"人数")
plt.show()
'''
# 然后我们再来看看各种舱级别情况下各性别的获救情况
fig=plt.figure()
fig.set(alpha=0.65) # 设置图像透明度,无所谓
plt.title(u"根据舱等级和性别的获救情况")
ax1=fig.add_subplot(141)
data_train.Survived[data_train.Sex == 'female'][data_train.Pclass != 3].value_counts().plot(kind='bar', label="female highclass", color='#FA2479')
ax1.set_xticklabels([u"获救", u"未获救"], rotation=0)
ax1.legend([u"女性/高级舱"], loc='best')
ax2=fig.add_subplot(142, sharey=ax1)
data_train.Survived[data_train.Sex == 'female'][data_train.Pclass == 3].value_counts().plot(kind='bar', label='female, low class', color='pink')
ax2.set_xticklabels([u"未获救", u"获救"], rotation=0)
plt.legend([u"女性/低级舱"], loc='best')
ax3=fig.add_subplot(143, sharey=ax1)
data_train.Survived[data_train.Sex == 'male'][data_train.Pclass != 3].value_counts().plot(kind='bar', label='male, high class',color='lightblue')
ax3.set_xticklabels([u"未获救", u"获救"], rotation=0)
plt.legend([u"男性/高级舱"], loc='best')
ax4=fig.add_subplot(144, sharey=ax1)
data_train.Survived[data_train.Sex == 'male'][data_train.Pclass == 3].value_counts().plot(kind='bar', label='male low class', color='steelblue')
ax4.set_xticklabels([u"未获救", u"获救"], rotation=0)
plt.legend([u"男性/低级舱"], loc='best')
plt.show()
| JoyGin/DeepLearning_inHand | Kaggle/Titanic.py | Titanic.py | py | 4,279 | python | en | code | 1 | github-code | 13 |
21264361366 | #
# @lc app=leetcode id=74 lang=python3
#
# [74] Search a 2D Matrix
#
# @lc code=start
from typing import List
class Solution:
def searchMatrix(self, matrix: List[List[int]], target: int) -> bool:
rows, cols = len(matrix), len(matrix[0])
# tansform the 2D coordinator to 1D
lt, rt = 0, rows * cols - 1
while lt <= rt:
# find the index of middle element
mid = (lt + rt) // 2
# calculate its corresponding row, col index
row = mid // cols
col = mid % cols
if matrix[row][col] > target:
rt = mid - 1
if matrix[row][col] < target:
lt = mid + 1
if matrix[row][col] == target:
return True
return False
# @lc code=end
matrix = [[1,3,5,7],[10,11,16,20],[23,30,34,60]]
target = 3
matrix = [[1,3,5,7],[10,11,16,20],[23,30,34,60]]
target = 13
matrix = [[1],[3]]
target = 2
rs = Solution().searchMatrix(matrix, target)
print(rs) | sundaycat/Leetcode-Practice | solution/74. search-a-2-d-matrix.py | 74. search-a-2-d-matrix.py | py | 1,069 | python | en | code | 0 | github-code | 13 |
74045578258 | '''
Psycopg2 – Insert dictionary as JSON
'''
# __________________________ PREP _____________________________
import psycopg2
from psycopg2.extras import Json
import pandas as pd
import json
import ast
import pprint
''' lcoal import '''
from Config import payload
print(payload)
def parse_config():
dbname = payload['dbname']
host = payload['host']
user = payload['user']
password = payload['password']
tablename = payload['tablename']
return dbname, host, user, password, tablename
def get_connection():
return psycopg2.connect(
dbname = dbname,
host = host,
user = user,
password = password)
def get_cursor(conn):
# returns cursor
return conn.cursor()
def create_table(conn):
curr = conn.cursor()
table = 'daily_monitor'
create_table = """CREATE TABLE {} (id VARCHAR PRIMARY KEY,
json_col VARCHAR,)
;""".format(table)
curr.execute(create_table)
conn.commit()
def try_create_table(conn):
try:
create_table(conn)
except (Exception, psycopg2.DataError) as error:
print(error)
conn = get_connection()
## __________________________________ METHOD 1 __________________________________
# '''
# Method 1:
# Setting up a PostgreSQL Database and Table:
# '''
# # __________________________ imports __________________________
# import json
# # __________________________ insert data ______________________
# def dict_to_json(value: dict):
# # CONVERT DICT TO A JSON STRING AND RETURN
# return json.dumps(value)
# def insert_value(id: str, json_col: str, conn):
# # CREATE A CURSOR USING THE CONNECTION OBJECT
# curr = conn.cursor()
# # EXECUTE THE INSERT QUERY
# curr.execute(f'''
# INSERT INTO
# json_table(id, json_col)
# VALUES
# ('{id}', '{json_col}')
# ''')
# # COMMIT THE ABOVE REQUESTS
# conn.commit()
# # CLOSE THE CONNECTION
# conn.close()
# def main():
# # CREATE A PSYCOPG2 CONNECTION
# conn = get_connection()
# #TRY CREATING TABLE
# try_create_table(conn)
# # CREATE A PYTHON DICT OBJECT FOR JSON COL
# dict_obj = {
# "name": "John Smith",
# "skill": "Python",
# "experience": 2
# }
# # CONVERT DICT OBJECT TO JSON STRING
# json_obj = dict_to_json(value=dict_obj)
# # INSERT VALUES IN THE DATABASE TABLE
# insert_value(id='JSON002', json_col=json_obj,
# conn=conn)
# if __name__ == '__main__':
# dbname, host, user, password = parse_config()
# main()
## |______________________________________________________________________________|
## __________________________________ METHOD 2 __________________________________
'''
Method 2:
Using the psycopg2 Json adaptation:
'''
from psycopg2.extras import Json
# # __________________________ insert data ____________________
def insert_data(curr, id, dict_obj):
print("_"*50)
print()
print()
print(dict_obj)
print()
print()
print("_"*50)
_, _, _, _, tablename = parse_config()
curr.execute(f'''
INSERT INTO
{tablename}(id, json_col)
VALUES
('{id}', %s)
''', [Json(dict_obj)])
def insert():
conn = get_connection()
curr = get_cursor(conn)
manual_insert = manualInsertList()
ids = [i['channel_username'] for i in manual_insert]
dict_objs = manualInsertList()
print(f'currently inserting the channels: {ids}')
''' EXECUTE THE INSERT QUERY '''
for id, dict_obj in zip(ids, dict_objs):
insert_data(curr, id, dict_obj)
''' COMMIT & CLOSE '''
curr.close()
conn.commit()
conn.close()
# ## |______________________________________________________________________________|
# ## _______________________________ FETCHING DATA _______________________________
def fetch_data(curr):
search_list = searchList()
search_list = (str(search_list)[1:-1])
_, _, _, _, tablename = parse_config()
if search_list:
print(f'currently fetching the channels: {search_list}:')
print("_"*100)
curr.execute(f"SELECT * from {tablename} WHERE id in ({search_list});") # gets spesific
elif not search_list:
curr.execute(f"SELECT * FROM {tablename};") # gets all
else:
print()
print('''Unknow debug error:
The if statemant checking if the list "yesterday_ids" is empty,
returned that the list was neither.''')
print()
data = curr.fetchall()
# data = data[0][1]
df = pd.DataFrame(columns = ['channel_username','slug','channel_id','member_count',
'online_count', 'link', 'possible_creation_date','total_messages', 'messages_today'])
for row in data:
d = ast.literal_eval(row[1])
new_row = pd.DataFrame.from_dict([d])
df = pd.concat([df, new_row], axis = 0)
print()
print('FETCHED DATA AS JSON:')
pprint.pprint([ast.literal_eval(i[1]) for i in data])
print()
print('FETCHED DATA AS DATAFRAME:')
print(df)
print()
print("_"*100)
def fetch():
conn = get_connection()
curr = conn.cursor()
fetch_data(curr)
curr.close()
conn.commit()
conn.close()
## |______________________________________________________________________________|
## _________________________ PURGE TABLE ________________________
def purge_table(curr):
print("________ !!! WARNING !!! ________")
print(f''' you are about to delete ALL the of the current data stored in {tablename},
please make sure to make a backup before proceeding.''')
print()
warning = input(f"are you sure you want to purge {tablename}? (y/n): ")
if warning == 'y':
print(f"purging: {tablename}")
curr.execute(f"truncate daily_monitor;")
print(f"The data in {tablename} was purged")
elif warning == 'Y':
print(f"purging: {tablename}")
curr.execute(f"truncate daily_monitor;")
print(f"The data in {tablename} was purged")
elif warning == 'n':
print(f"Aborting the purge of {tablename}, the data will not be deleted.")
elif warning =='N':
print(f"Aborting the purge of {tablename}, the data will not be deleted.")
else:
print('''invalied input, please type "y" for yes, or "n" for no,
or press "ctrl+c" to close the program.''')
purge()
def purge():
conn = get_connection()
curr = conn.cursor()
purge_table(curr)
curr.close()
conn.commit()
conn.close()
## |___________________________________________________________________|
def manualInsertList():
'''
List of dictionary objects, used for manuell inserts, used by insert()
'''
manual_insert = [
{ 'timestamp':'2022.04.09',
'channel_title':'Etherium Classic',
'channel_username':'ethclassic',
'channel_id':23456789,
'member_count':7000,
'online_count':700,
'link':'https//t.me/ethclassic',
'possible_creation_date':'2016.09.13',
'total_messages':482002,
'messages_today':0, },
{ 'timestamp':'2022.04.09',
'channel_title':'FTX something something',
'channel_username':'FTX_Official',
'channel_id':23456789,
'member_count':76000,
'online_count':7600,
'link':'https//t.me/ethclassic',
'possible_creation_date':'2016.09.13',
'total_messages':690000,
'messages_today':0, },
]
return manual_insert
def searchList():
'''
List of channels, used for spesific seaches, used by fetch()
'''
# search_list = [ 'RuffChain', 'UniMexNetwork', ]
search_list = []
return search_list
if __name__ == '__main__':
dbname, host, user, password, tablename = parse_config()
''' choose which action you want to do next:'''
fetch()
# purge()
# insert()
'''
QUICK USER MANUAL:
Before usage, remember to update payload in the config file (Config.py), so that the code can accsess the database.
The database admin has three options on the controll panel:
1. purge()
2. insert()
3. fetch()
purge():
deletes ALL the data in the database, be WARNED.
insert():
..mainly used for testing and while building the code.
manually inserts data from a list of dictionaries named "manual_insert".
you'll find the list in the function right above "manualInsert()",
to change the list, change the values (on the right side of ":") inside the dict "{}",
remember to use commas.
remember to use "" or '' for texts and dates, and do NOT use "" or '' for numbers.
fetch():
simply fetches the data from the database and displays them for you as a dataframe.
By default it fetches all of the channels from the database,
if the user wants to check out one or more specific channles;
go to the function "searchList()" above and replace the list "search_list"
from this (default):
search_list = []
to this (spesific search):
search_list = [ 'RuffChain', 'UniMexNetwork', ]
remember to use commas and "" or ''
HOW TO USE:
To use a function simply remove the "#" character before the code to activate it,
everything with "#" infront of it will be igrored by the machine. e.g.:
fetch()
# purge()
# insert()
now you have chosen the fetch option.
# fetch()
purge()
# insert()
now you have chosen the purge option.
''' | Borgerod/Telegram_surveillance | postgres_controll_panel.py | postgres_controll_panel.py | py | 10,813 | python | en | code | 0 | github-code | 13 |
24263221686 | from waflib import Configure, Errors, Utils
# TODO: make generic
CHECK_SYMBOL_EXISTS_FRAGMENT = '''
#include "build.h"
int main(int argc, char** argv)
{
(void)argv;
#ifndef %s
return ((int*)(&%s))[argc];
#else
(void)argc;
return 0;
#endif
}
'''
# generated(see comments in public/build.h)
# cat build.h | grep '^#undef XASH' | awk '{ print "'\''" $2 "'\''," }'
DEFINES = [
'XASH_64BIT',
'XASH_AMD64',
'XASH_ANDROID',
'XASH_APPLE',
'XASH_ARM',
'XASH_ARM_HARDFP',
'XASH_ARM_SOFTFP',
'XASH_ARMv4',
'XASH_ARMv5',
'XASH_ARMv6',
'XASH_ARMv7',
'XASH_ARMv8',
'XASH_BIG_ENDIAN',
'XASH_DOS4GW',
'XASH_E2K',
'XASH_EMSCRIPTEN',
'XASH_FREEBSD',
'XASH_HAIKU',
'XASH_IOS',
'XASH_IRIX',
'XASH_JS',
'XASH_LINUX',
'XASH_LINUX_UNKNOWN',
'XASH_LITTLE_ENDIAN',
'XASH_MIPS',
'XASH_MOBILE_PLATFORM',
'XASH_NETBSD',
'XASH_OPENBSD',
'XASH_POSIX',
'XASH_PPC',
'XASH_RISCV',
'XASH_RISCV_DOUBLEFP',
'XASH_RISCV_SINGLEFP',
'XASH_RISCV_SOFTFP',
'XASH_SERENITY',
'XASH_WIN32',
'XASH_X86',
'XASH_NSWITCH',
'XASH_PSVITA',
]
def configure(conf):
conf.env.stash()
conf.start_msg('Determining library postfix')
tests = map(lambda x: {
'fragment': CHECK_SYMBOL_EXISTS_FRAGMENT % (x, x),
'includes': [conf.path.find_node('public/').abspath()],
'define_name': x }, DEFINES )
conf.multicheck(*tests, msg = '', mandatory = False, quiet = True)
# engine/common/build.c
if conf.env.XASH_ANDROID:
buildos = "android"
elif conf.env.XASH_LINUX_UNKNOWN:
buildos = "linuxunkabi"
elif conf.env.XASH_WIN32 or conf.env.XASH_LINUX or conf.env.XASH_APPLE:
buildos = "" # no prefix for default OS
elif conf.env.XASH_FREEBSD:
buildos = "freebsd"
elif conf.env.XASH_NETBSD:
buildos = "netbsd"
elif conf.env.XASH_OPENBSD:
buildos = "openbsd"
elif conf.env.XASH_EMSCRIPTEN:
buildos = "emscripten"
elif conf.env.XASH_DOS4GW:
buildos = "dos4gw" # unused, just in case
elif conf.env.XASH_HAIKU:
buildos = "haiku"
elif conf.env.XASH_SERENITY:
buildos = "serenityos"
elif conf.env.XASH_NSWITCH:
buildos = "nswitch"
elif conf.env.XASH_PSVITA:
buildos = "psvita"
elif conf.env.XASH_IRIX:
buildos = "irix"
else:
conf.fatal("Place your operating system name in build.h and library_naming.py!\n"
"If this is a mistake, try to fix conditions above and report a bug")
if conf.env.XASH_AMD64:
buildarch = "amd64"
elif conf.env.XASH_X86:
if conf.env.XASH_WIN32 or conf.env.XASH_LINUX or conf.env.XASH_APPLE:
buildarch = ""
else:
buildarch = "i386"
elif conf.env.XASH_ARM and conf.env.XASH_64BIT:
buildarch = "arm64"
elif conf.env.XASH_ARM:
buildarch = "armv"
if conf.env.XASH_ARMv8:
buildarch += "8_32"
elif conf.env.XASH_ARMv7:
buildarch += "7"
elif conf.env.XASH_ARMv6:
buildarch += "6"
elif conf.env.XASH_ARMv5:
buildarch += "5"
elif conf.env.XASH_ARMv4:
buildarch += "4"
else:
raise conf.fatal('Unknown ARM')
if conf.env.XASH_ARM_HARDFP:
buildarch += "hf"
else:
buildarch += "l"
elif conf.env.XASH_MIPS:
buildarch = "mips"
if conf.env.XASH_64BIT:
buildarch += "64"
if conf.env.XASH_LITTLE_ENDIAN:
buildarch += "el"
elif conf.env.XASH_RISCV:
buildarch = "riscv"
if conf.env.XASH_64BIT:
buildarch += "64"
else:
buildarch += "32"
if conf.env.XASH_RISCV_DOUBLEFP:
buildarch += "d"
elif conf.env.XASH_RISCV_SINGLEFP:
buildarch += "f"
elif conf.env.XASH_JS:
buildarch = "javascript"
elif conf.env.XASH_E2K:
buildarch = "e2k"
elif conf.env.XASH_PPC:
buildarch = "ppc"
if conf.env.XASH_64BIT:
buildarch += "64"
if conf.env.XASH_LITTLE_ENDIAN:
buildarch += "el"
else:
raise conf.fatal("Place your architecture name in build.h and library_naming.py!\n"
"If this is a mistake, try to fix conditions above and report a bug")
conf.env.revert()
if buildos == 'android':
# force disable for Android, as Android ports aren't distributed in normal way and doesn't follow library naming
conf.env.POSTFIX = ''
elif buildos != '' and buildarch != '':
conf.env.POSTFIX = '_%s_%s' % (buildos,buildarch)
elif buildarch != '':
conf.env.POSTFIX = '_%s' % buildarch
else:
conf.env.POSTFIX = ''
conf.end_msg(conf.env.POSTFIX)
| FWGS/hlsdk-portable | scripts/waifulib/library_naming.py | library_naming.py | py | 4,144 | python | en | code | 222 | github-code | 13 |
39659982749 | import random
class BankAccount:
"""
A class to respresent a bank account.
Attributes
----------
full_name : str
the first and last name of the bank account owner
account_number : int
randomly generated 8 digit number, unique per account
routing_number : int
9 digit number, this number is the same for all accounts
balance : int
the balance of money in the account, it should start at 0
Methods
-------
def deposit(amount):
take one parameter amount and will add amount to the balance, also will print a confirmati-
on message with amount deposited
def withdraw(amount):
take one parameter amount and will subtract amount from the balance, also will print a con-
firmation message with the amount withdrawn
def get_balance():
print a user-friendly message with the account balance, also will return the current balan-
ce of the account
def add_interest():
adds interest to the user's balance
def print_receipt():
prints a receipt with the account name, account number, routing number, and balance
"""
routing_number = 987654323
def __init__(self, full_name):
"""
Constructs necessary attributes for the BankAccount object.
Parameters
----------
full_name : str
the first and last name of the bank account owner
"""
self.full_name = full_name
self.account_number = random.randint(10000000, 99999999)
self.balance = 0
# Deposit Method
def deposit(self, amount):
"""
Takes one parameter amount and will subtract amount from the balance, also will print a co-
nfirmation message with amount withdrawn.
Parameters
----------
amount : int
the value that is being added to the balance
"""
self.balance += amount
print(f"Amount Deposited: ${amount}")
# Withdraw method
def withdraw(self, amount):
"""
Takes one parameter amount and will add amount to the balance, also will print a confirmat-
ion message with amount deposited. If the user tries to withdraw an amount that is greater/
larger than the current balance, print ”Insufficient funds.” and charge them with an overd-
raft fee.
Parameters
----------
amount : int
the value that is being subtracted from the balance
"""
overdraft_fee = 10
if amount > self.balance:
self.balance -= overdraft_fee
print("Insufficient funds.")
else:
self.balance -= amount
print(f"Amount Withdrawn: ${amount}")
# Get Balance Method
def get_balance(self):
"""
Will print a user-friendly message with the account balance and then also return the curre-
nt balance of the account.
Parameters
----------
None
"""
print(f"Hello, your current balance is: ${round(self.balance, 2)}")
# Add Interest Method
def add_interest(self):
"""
Adds interest to the user's current balance. The annual interest rate is 1% (i.e. 0.083% p-
er month). The monthly interest is calculated by the following equation: interest = balanc-
e * 0.00083.
Parameters
----------
None
"""
monthly_interest = 0.00083
interest = self.balance * monthly_interest
self.balance += interest
# Print Receipt Method
def print_receipt(self):
"""
Prints a receipt with the account name, account number, routing number, and balance.
Parameters
----------
None
"""
print(f"""{self.full_name}
Account No.: ****{str(self.account_number)[-4:]}
Routing No.: {self.routing_number}
Balance: ${round(self.balance, 2)}""")
# Instantiate objects
matthew = BankAccount("Matthew Wei")
thomas = BankAccount("Thomas Cat")
gerald = BankAccount("Gerald Mouse")
# Matthew
print("----------------------")
matthew.deposit(10)
matthew.withdraw(5)
matthew.get_balance()
matthew.add_interest()
matthew.print_receipt()
# Thomas
print("-----------------------")
thomas.deposit(20)
thomas.withdraw(30)
thomas.get_balance()
thomas.add_interest()
thomas.print_receipt()
# Gerald
print("----------------------")
gerald.deposit(100)
gerald.withdraw(99)
gerald.get_balance()
gerald.add_interest()
gerald.print_receipt()
# Divider
print("----------------------")
| matthewwei35/CS-1.1_MW_Bank_Account | bank_account.py | bank_account.py | py | 4,585 | python | en | code | 1 | github-code | 13 |
6534991885 | from tkinter import *
from PIL import ImageTk, Image
import mysql.connector
import testthirdgame
root = Tk()
root.geometry("1366x768")
root.configure(bg="#324AEE")
root.resizable(0,0)
frame=Frame(root,bg="#324AEE",height=768,width=1366)
frame.place(x=0,y=0)
img2 = Image.open("polygon1.png")
img2 = img2.resize((900, 600), Image.ANTIALIAS)
img2 = ImageTk.PhotoImage(img2)
img = Image.open("polygon1.png")
img = img.resize((405, 370), Image.ANTIALIAS)
img = ImageTk.PhotoImage(img)
img1 = Image.open("polygonyellow1.png")
img1 = img1.resize((405, 370), Image.ANTIALIAS)
img1 = ImageTk.PhotoImage(img1)
c=Canvas(frame,bg='#324AEE',width=1366,height=150,highlightthickness=0)
c.place(x=0,y=10)
c.create_image(650, 65, anchor=CENTER, image=img2)
reg=Label(frame,text='Game Rules',bg="#0A22B1",fg="white",font="Courier 30 bold")
reg.place(x=500,y=55)
def on_enter1(e):
btn['image'] = img1
def on_leave1(e):
btn['image'] = img
def prr():
root.destroy()
testthirdgame.game()
btn=Button(frame,text='Start',height=100,width=400,command=prr,font="Courier 17 bold",activebackground='#324AEE',
bg="#324AEE",fg="white",image=img,compound="center",relief="groove",bd=0)
btn.place(x=750,y=600)
fff=Label(frame,bg='#324AEE',bd=0,fg="white",font="Courier 13 bold",text='1. There will be 11 questions')
fff.place(x=10,y=200)
fff1=Label(frame,bg='#324AEE',bd=0,fg="white",font="Courier 13 bold",text='2. They are arranged by increasing order of difficulty')
fff1.place(x=10,y=250)
fff2=Label(frame,bg='#324AEE',bd=0,fg="white",font="Courier 13 bold",text='3. Simplier questions first and are worth less')
fff2.place(x=10,y=300)
fff3=Label(frame,bg='#324AEE',bd=0,fg="white",font="Courier 13 bold",text='4. Answering the hardest 11th question, will make you a winner of ₹1 Crore!')
fff3.place(x=10,y=350)
fff3=Label(frame,bg='#324AEE',bd=0,fg="white",font="Courier 13 bold",text='5. There are no lifelines!!')
fff3.place(x=10,y=400)
fff3=Label(frame,bg='#324AEE',bd=0,fg="white",font="Courier 13 bold",text='6. Wrong answer will not make you lose any money so be chilled and relax')
fff3.place(x=10,y=450)
btn.bind("<Enter>", on_enter1)
btn.bind("<Leave>", on_leave1)
root.mainloop()
| anazr9/kbc | gamerules.py | gamerules.py | py | 2,258 | python | en | code | 0 | github-code | 13 |
15185501956 | from flask import Flask, render_template, jsonify
from flask_socketio import SocketIO
app = Flask(__name__)
app.config['SECRET_KEY']='secret!'
socketapp = SocketIO(app)
@app.route("/")
def index():
return render_template("index.html")
@socketapp.on('message')
def handle_message(message):
print('received message: ' + message)
if __name__ == "__main__":
print("starting")
socketapp.run(app)
# app.run()
print("postrunline")
| kirkdotcam/flasksocketexample | app.py | app.py | py | 452 | python | en | code | 0 | github-code | 13 |
38243444996 | # coding=utf-8
import re
import requests
__all__ = ('check_ver',)
session = requests.Session()
session.trust_env = False
url = 'https://raw.githubusercontent.com/animalize/ting_py/master/launcher.py'
def check_ver(current, full=True):
try:
r = session.get(url)
except:
return '无法获取GitHub上的页面'
try:
r.encoding = 'utf-8'
html = r.text
except:
return '无法用utf-8解码“包含版本信息的网页”'
if full:
p = r'FULL_VERSION\s*=\s*(\d+)'
else:
p = r'TING_VERSION\s*=\s*(\d+)'
m = re.search(p, html)
if not m:
return '无法从“包含版本信息的网页”提取最新的版本号'
ver = int(m.group(1))
if ver > current:
b = '当前版本%d,发现新版本%d\n' % (current, ver)
if full:
a = '\nTing桌面端集成版 检查更新\n'
c = '到这里下载新版:https://github.com/animalize/ting_py/releases\n'
else:
a = '\nTing桌面端 检查更新\n'
c = '到这里下载新版:https://github.com/animalize/ting_py\n'
s = a + b + c
else:
s = '没有发现新版本\n'
return s
| animalize/ting_py | pc/checkver.py | checkver.py | py | 1,269 | python | en | code | 0 | github-code | 13 |
33517935338 | from collections import OrderedDict
from typing import List, Dict
def classify_conversions(arrays: Dict[int, Dict[str, float]], conversion_classifications: List[dict], conversion_levels):
"""classify conversion by code and type using binned classes
Args:
arrays (dict): reach dictionaries with conversion type dictionaries to classify
conversion_classifications (List(dict)): list of dicitionaries generated by conversion csv
Returns:
Dict: reach dictionary of conversion types and codes
"""
bins = [v for v in conversion_levels if v["MaxValue"]]
# OrderedDict([("Very Minor", 0.1),
# ("Minor", 0.25),
# ("Moderate", 0.5),
# ("Significant", 1.0)]) # value <= bin
unknown_code = next(item["ConversionID"] for item in conversion_classifications if item['ConversionType'] == 'NoChange' and item['ConversionLevel'] == 'Unknown')
output = {}
pos_classes = {value["ConversionType"]: value for value in conversion_classifications if int(value["TypeValue"]) > 0}
neg_classes = {value["ConversionType"]: value for value in conversion_classifications if int(value["TypeValue"]) < 0}
for reach_id, reach_values in arrays.items():
pos_reach_values = {key: reach_values[key] for key in pos_classes.keys()}
neg_reach_values = {key: reach_values[key] for key in neg_classes.keys()}
reach_values["RiparianTotal"] = sum(list(neg_reach_values.values()))
# Case 2 No Change
if reach_values["NoChange"] >= 0.85: # no change is over .85
reach_values["ConversionID"] = next((item["ConversionID"] for item in conversion_classifications if item["ConversionType"] == 'NoChange' and item["ConversionLevel"] == "NoChange (>90%)"), unknown_code)
output[reach_id] = reach_values
# Case 2 Conversion to Riparian
elif all([value < reach_values['RiparianTotal'] for value in pos_reach_values.values()]):
for b in bins:
if reach_values["RiparianTotal"] <= b["MaxValue"]:
reach_values["ConversionID"] = next((item["ConversionID"] for item in conversion_classifications if item['LevelID'] == b["LevelID"] and item['ConversionType'] == 'Riparian'), unknown_code)
output[reach_id] = reach_values
break
# Case 3 Conversion from Riparian
elif any([v > 0.0 for v in pos_reach_values.values()]):
largest_class = max(pos_reach_values, key=pos_reach_values.get)
for b in bins:
if reach_values[largest_class] <= b['MaxValue']: # check
reach_values["ConversionID"] = next((item["ConversionID"] for item in conversion_classifications if item['LevelID'] == b["LevelID"] and item['ConversionType'] == largest_class), unknown_code)
output[reach_id] = reach_values
break
else:
reach_values["ConversionID"] = unknown_code
output[reach_id] = reach_values
return output
| Riverscapes/riverscapes-tools | packages/rvd/rvd/lib/classify_conversions.py | classify_conversions.py | py | 3,031 | python | en | code | 10 | github-code | 13 |
42076318398 | import torch
import torch.nn as nn
import torch.nn.functional as F
import matplotlib.pyplot as plt
class CNN(nn.Module):
def __init__(self, args, num_layers, input_shape, channel_size, output_size1=100, output_size2=1000, bn_momentum=1e-3, dropout=0.):
super(CNN, self).__init__()
self.layers = num_layers
self.conv1 = nn.Conv2d(input_shape[0], channel_size, kernel_size=3, stride=1, padding=1)
self.conv2 = nn.Conv2d(input_shape[0], channel_size, kernel_size=3, stride=1, padding=1)
self.conv_layers1 = nn.ModuleList([
nn.Conv2d(channel_size, channel_size, kernel_size=3, stride=1, padding=1) \
for _ in range(num_layers-1)])
self.conv_layers2 = nn.ModuleList([
nn.Conv2d(channel_size, channel_size, kernel_size=3, stride=1, padding=1) \
for _ in range(num_layers-1)])
if args.mode == 'mt_filter':
self.conv1 = nn.Conv2d(input_shape[0], channel_size, kernel_size=3, stride=1, padding=1)
self.conv1.weight = nn.Parameter(self.conv1.weight + torch.Tensor(1), requires_grad=True)
self.conv1.bias = nn.Parameter(self.conv1.bias + torch.Tensor(1), requires_grad=True)
self.conv2 = nn.Conv2d(input_shape[0], channel_size, kernel_size=3, stride=1, padding=1)
self.conv2.weight = nn.Parameter(self.conv2.weight + torch.Tensor(1), requires_grad=True)
self.conv2.bias = nn.Parameter(self.conv2.bias + torch.Tensor(1), requires_grad=True)
self.conv_layers1 = nn.ModuleList([nn.Conv2d(channel_size, channel_size, kernel_size=3, stride=1, padding=1) for _ in range(num_layers-1)])
for i in range(num_layers-1):
self.conv_layers1[i].weight = nn.Parameter(self.conv_layers1[i].weight + torch.Tensor(1), requires_grad=True)
self.conv_layers1[i].bias = nn.Parameter(self.conv_layers1[i].bias + torch.Tensor(1), requires_grad=True)
self.conv_layers2 = nn.ModuleList([nn.Conv2d(channel_size, channel_size, kernel_size=3, stride=1, padding=1) for _ in range(num_layers-1)])
for i in range(num_layers-1):
self.conv_layers2[i].weight = nn.Parameter(self.conv_layers2[i].weight + torch.Tensor(1), requires_grad=True)
self.conv_layers2[i].bias = nn.Parameter(self.conv_layers2[i].bias + torch.Tensor(1), requires_grad=True)
self.input1 = nn.Sequential(
self.conv1,
nn.BatchNorm2d(num_features=channel_size, momentum=bn_momentum),
nn.ReLU(True),
nn.Dropout(p=dropout),
nn.MaxPool2d(kernel_size=2, stride=2)
)
self.input2 = nn.Sequential(
self.conv2,
nn.BatchNorm2d(num_features=channel_size, momentum=bn_momentum),
nn.ReLU(True),
nn.Dropout(p=dropout),
nn.MaxPool2d(kernel_size=2, stride=2)
)
if self.layers >= 2:
self.conv_block1 = nn.ModuleList([
nn.Sequential(
self.conv_layers1[i],
nn.BatchNorm2d(num_features=channel_size, momentum=bn_momentum),
nn.ReLU(True),
nn.Dropout(p=dropout),
nn.MaxPool2d(kernel_size=2, stride=2)
) for i in range(num_layers-1)
])
self.conv_block2 = nn.ModuleList([
nn.Sequential(
self.conv_layers2[i],
nn.BatchNorm2d(num_features=channel_size, momentum=bn_momentum),
nn.ReLU(True),
nn.Dropout(p=dropout),
nn.MaxPool2d(kernel_size=2, stride=2)
) for i in range(num_layers-1)
])
height = input_shape[1]
width = input_shape[2]
for _ in range(num_layers):
height = height // 2
width = width // 2
self.output1 = nn.Linear(height*width*channel_size, output_size1)
self.output2 = nn.Linear(height*width*channel_size, output_size2)
self.softmax = nn.Softmax(dim=1)
# for m in self.modules():
# if isinstance(m, nn.Linear):
# m.weight.data = nn.init.xavier_uniform_(m.weight.data, gain=nn.init.calculate_gain('tanh'))
# m.bias.data = nn.init.constant_(m.bias.data, 0.0)
def forward(self, x1, x2):
x1 = self.input1(x1)
if self.layers >= 2:
for conv_block1 in self.conv_block1:
x1 = conv_block1(x1)
x2 = self.input2(x2)
if self.layers >= 2:
for conv_block2 in self.conv_block2:
x2 = conv_block2(x2)
x1 = x1.view(x1.size(0), -1)
x2 = x2.view(x2.size(0), -1)
out1 = self.output1(x1)
# out1 = self.softmax(out1)
out2 = self.output2(x2)
# out2 = self.softmax(out2)
return out1, out2
def build_mt_cnn(args, num_layers, input_shape, channel_size, output_size1, output_size2, bn_momentum, dropout):
return CNN(args, num_layers, input_shape, channel_size, output_size1, output_size2, bn_momentum, dropout)
| sangminwoo/Cost-Out-Multitask-Learning | lib/models/multi_task/cnn_multitask.py | cnn_multitask.py | py | 5,499 | python | en | code | 1 | github-code | 13 |
7023662768 | # -*- coding: utf-8 -*-
import numpy as np
import time
from sklearn.model_selection import train_test_split
from sklearn.metrics import mean_squared_error
from sklearn.metrics import r2_score
from scipy import stats
from sklearn.datasets import fetch_california_housing
from sklearn import neighbors
start_time = time.time()
x_train, y_train = fetch_california_housing(return_X_y=True)
x_train, x_test, y_train, y_test = train_test_split(
x_train, y_train, test_size=0.40)
# convertion to float32
x_train = np.array(x_train, np.float32)
y_train = np.array(y_train, np.float32)
# z-score normalization for better results
x_train = stats.zscore(x_train)
y_train = stats.zscore(y_train)
x_test = stats.zscore(x_test)
y_test = stats.zscore(y_test)
start_time = time.time()
# using the knn algorithm for regression
model = neighbors.KNeighborsRegressor(n_neighbors = 5)
model.fit(x_train, y_train)
y_pred=model.predict(x_test)
# average distance between the observed data values and the predicted data values
rmse = mean_squared_error(y_test, y_pred, squared = False)
r2 = r2_score(y_test, y_pred)
print('rmse:', rmse)
print('r2:', r2)
print("--- %s seconds ---" % (time.time() - start_time))
| chaido-porlou/NeuralNetworks | RBF/knn_regression.py | knn_regression.py | py | 1,253 | python | en | code | 0 | github-code | 13 |
34363539638 | import logging
from godzillops import Chat
def main(config):
gz_chat = Chat(config)
try:
_input = ""
while True:
_input = input("> ")
responses = gz_chat.respond(_input)
try:
for response in responses:
if isinstance(response, str):
print(response)
except:
logging.exception("Error generated responding to < {} >.".format(_input))
print("An error occurred - check the logs. Reinitializing GZ.")
gz_chat = Chat(config)
except (EOFError, KeyboardInterrupt):
print("Exiting...")
| deybhayden/tokyo | platforms/text.py | text.py | py | 671 | python | en | code | 0 | github-code | 13 |
19988300684 | from django.conf.urls import url
from StoriesApp import views
urlpatterns = [
url(r'^stories/$', views.AllStoriesView.as_view()),
url(r'^stories/(?P<storie_uuid>[0-9a-fA-F]{8}-[0-9a-fA-F]{4}-[0-9a-fA-F]{4}-[0-9a-fA-F]{4}-[0-9a-fA-F]{12})/$',
views.ConcreteStorieView.as_view()),
url(r'^api/new_storie/$', views.new_storie, name='new_storie'),
url(r'^$' , views.done, name = 'done'),
url(r'^$' , views.nope, name = 'nope'),
]
| Linonse/RSOI-lab2-workers | Stories/StoriesApp/urls.py | urls.py | py | 449 | python | en | code | 0 | github-code | 13 |
13142188905 | import os
import csv
import cv2
import numpy as np
import random
import math
from keras.models import Sequential
from keras.layers import Flatten, Dense, Lambda, Conv2D, Dropout, Cropping2D
import sklearn
from sklearn.utils import shuffle
from keras.callbacks import ModelCheckpoint
from keras.optimizers import Adam
from sklearn.model_selection import train_test_split
# Image augumentation by adding random brightness.
def random_brightness(image):
"""
Randomly adjust brightness of the image.
"""
# HLS (Hue, Lightness, saturation).
hls = cv2.cvtColor(image, cv2.COLOR_RGB2HLS)
ratio = 1.0 + 0.4 * (np.random.rand() - 0.5)
hls[:,:,2] = hls[:,:,2] * ratio
return cv2.cvtColor(hls, cv2.COLOR_HLS2RGB)
# Defining image loading function.
def get_driving_log(path):
lines = []
with open(path + 'driving_log.csv') as csvfile:
reader = csv.reader(csvfile)
for line in reader:
lines.append(line)
return lines
del_angle = 0.001 # to delete less than 0.001 steering angle images, i.e. straight line image data.
del_rate = 0.5 # to delete 50% straight line image data.
def generator(path, samples, batch_size=32):
num_samples = len(samples)
while 1: # Loop forever so the generator never terminates
samples = sklearn.utils.shuffle(samples)
for offset in range(0, num_samples, batch_size):
batch_samples = samples[offset:offset+batch_size]
images = []
angles = []
for batch_sample in batch_samples:
for image_nb in [0, 1, 2]:
name = path + 'IMG/'+ batch_sample[image_nb].split('/')[-1]
image = cv2.cvtColor(cv2.imread(name), cv2.COLOR_BGR2RGB)
image = random_brightness(image)
# added +/- 0.27 angle offset for left/right images
shift_dict = {0: 0, 1: 0.27, 2: -0.27}
angle = float(batch_sample[3]) + shift_dict.get(image_nb, "error")
if angle < del_angle: # to ignore zero steering angle data
if np.random.random() < del_rate:
continue
# Image augumentation by flipping images and change in corrosponding steering angle.
images.append(np.fliplr(image))
angles.append(-angle)
images.append(image)
angles.append(angle)
X_train = np.array(images)
y_train = np.array(angles)
yield sklearn.utils.shuffle(X_train, y_train)
# ------ Training/Validation data loading -------
path = './data_Udacity_2/'
samples = get_driving_log(path)
train_samples, validation_samples = train_test_split(samples, test_size=0.2)
# compile and train the model using the generator function
train_generator = generator(path, train_samples, batch_size=32)
validation_generator = generator(path, validation_samples, batch_size=32)
# Resize images as required for network.
def resize_im(x):
from keras.backend import tf
return tf.image.resize_images(x, (66, 160))
#NVIDIA Model
model = Sequential()
model.add(Cropping2D(cropping=((70,20), (0,0)), input_shape=(160,320,3))) # Cropping images to delete sky and car hood area.
model.add(Lambda(lambda x: x / 127.5 - 1)) # image normalization function
model.add(Lambda(resize_im)) # resizing images.
# 5x5 filter used with elu activation for introducing Nonlinearity.
model.add(Conv2D(24, 5, 5, activation='elu', subsample=(2, 2)))
model.add(Conv2D(36, 5, 5, activation='elu', subsample=(2, 2)))
model.add(Conv2D(48, 5, 5, activation='elu', subsample=(2, 2)))
model.add(Conv2D(64, 3, 3, activation='elu'))
model.add(Conv2D(64, 3, 3, activation='elu'))
model.add(Dropout(0.5)) # droupout with 50% keep prob added to reduce overfitting.
model.add(Flatten())
model.add(Dense(100, activation='elu'))
model.add(Dense(50, activation='elu'))
model.add(Dense(10, activation='elu'))
model.add(Dense(1))
model.summary()
# Checkpoint added to select best model in no. of epochs.
checkpoint = ModelCheckpoint('model-{epoch:03d}.h5', monitor='val_loss', verbose=0, save_best_only='true' , mode='auto')
# Complie model with mean squared error losss function and ada optimizer.
model.compile(loss='mse', optimizer='adam')
model.fit_generator(train_generator, samples_per_epoch = len(train_samples), validation_data=validation_generator, nb_val_samples=len(validation_samples), nb_epoch=5, callbacks=[checkpoint], verbose=1)
model.save('model.h5')
# Explicitly end tensorflow session
from keras import backend as K
K.clear_session()
| ajdhole/Udacity-Behavioral-Cloning-P3 | model.py | model.py | py | 4,708 | python | en | code | 0 | github-code | 13 |
71340824658 | import requests
from bs4 import BeautifulSoup
src_url = 'https://vjudge.net/'
def fetch(url):
headers = {'User-Agent':'Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/63.0.3239.84 Safari/537.36'}
resp = requests.get(url, headers=headers)
return resp
def parse(resp):
soup = BeautifulSoup(resp.text, 'lxml')
content = soup.find('div', attrs = {'class': 'col-md-8 push-md-4'})
oj_url = []
oj_img = []
for oj in content.find_all('a'):
#there are two irrelevant links in content
if oj.find('img'):
oj_url.append(oj['href'])
oj_img.append(oj.find('img')['src'])
return oj_img, oj_url
def main():
res = fetch(src_url)
img, url = parse(res)
with open('oj.txt', 'w') as fp:
for u in url:
fp.write(u + '\n')
cnt = 0
for i in img:
link = src_url + i
responce = fetch(link)
with open(u'{name}.jpg'.format(name=cnt), 'wb') as fp:
fp.write(responce.content)
cnt += 1
if __name__ == '__main__':
main()
| civp/spider-zoo | sites.py | sites.py | py | 1,129 | python | en | code | 1 | github-code | 13 |
34177523344 | import cv2
import numpy as np
from torch.nn import functional as F
class VideoRecorder(object):
def __init__(self, num_img, output_filename="output.avi"):
self.frames = {}
self.img_frames = {}
self.num_steps = (
100 # TODO: Don't finish on hardcode value, trigger end from outside.
)
self.output_filename = output_filename
self.num_img = num_img
def record(
self,
img=None,
attn=None,
num_lang_tokens=None,
num_pat_img=None,
num_heads=None,
heatmap=None,
):
bs = img.shape[0]
for i in range(bs):
for j in range(self.num_img):
single_img = img[i, j, 3:6].cpu().numpy()
# Denormalize img
single_img = ((single_img + 1.0) * 255.0 / 2.0).astype(np.uint8)
single_img = np.transpose(single_img, (1, 2, 0))
single_hm = heatmap[i, j].cpu().numpy()
single_attn = attn[
-1
] # Last attention layer - [bs * num_img * num_heads, num_pat_img * num_pat_img + num_lang_tokens, num_pat_img * num_pat_img + num_lang_tokens]
batch_size = i * num_heads * self.num_img
start_index = batch_size + j * num_heads
single_attn_mean = single_attn[
start_index : start_index + num_heads
].mean(
dim=0
) # [num_pat_img * num_pat_img + num_lang_tokens, num_pat_img * num_pat_img + num_lang_tokens]
single_attn_mean = single_attn_mean[
num_lang_tokens:, num_lang_tokens:
] # [num_pat_img * num_pat_img, num_pat_img * num_pat_img]
sum_attn = single_attn_mean.sum(dim=0) # [num_pat_img * num_pat_img]
# Reshape the attention weights to a 2D spatial structure
single_attn_map = sum_attn.reshape(
num_pat_img, num_pat_img
) # [num_pat_img, num_pat_img]
single_attn_map = single_attn_map.unsqueeze(0).unsqueeze(0)
h, w, _ = single_img.shape
upscaled_attention_map = (
F.interpolate(single_attn_map, size=(h, w), mode="bilinear")
.squeeze()
.cpu()
.numpy()
)
overlay = self.create_overlay(
single_img, single_hm, upscaled_attention_map
)
if j not in self.frames:
self.frames[j] = []
self.img_frames[j] = []
self.frames[j].append(overlay)
self.img_frames[j].append(single_img)
# If the number of steps is reached, create the video
if all(len(frames) >= self.num_steps for frames in self.frames.values()):
self.create_videos()
self.frames = {} # Clear the frames
self.img_frames = {}
def create_overlay(self, img, heatmap, attn):
heatmap = cv2.normalize(heatmap, None, 0, 255, cv2.NORM_MINMAX)
heatmap_colored = cv2.applyColorMap(
heatmap.astype(np.uint8), cv2.COLORMAP_JET
).astype(np.uint8)
attn = cv2.normalize(attn, None, 0, 255, cv2.NORM_MINMAX)
attention_colored = cv2.applyColorMap(
attn.astype(np.uint8), cv2.COLORMAP_HOT
).astype(np.uint8)
overlay1 = cv2.addWeighted(img, 0.6, heatmap_colored, 0.4, 0)
overlay_final = cv2.addWeighted(overlay1, 0.6, attention_colored, 0.4, 0)
return overlay_final
def create_videos(self):
for camera_idx, frames in self.frames.items():
if frames:
self.create_video(frames, f"video_camera_{camera_idx}.avi")
for camera_idx, img_frames in self.img_frames.items():
if img_frames:
self.create_video(img_frames, f"video_camera_{camera_idx}_img_only.avi")
def create_video(self, frames, output_filename):
height, width, _ = frames[0].shape
fourcc = cv2.VideoWriter_fourcc(*"XVID")
video_out = cv2.VideoWriter(output_filename, fourcc, 20.0, (width, height))
for frame in frames:
video_out.write(frame)
video_out.release()
def save_image(self, frame, output_filename):
cv2.imwrite(output_filename, frame)
| LuisLechugaRuiz/general_manipulation | general_manipulation/utils/video_recorder.py | video_recorder.py | py | 4,448 | python | en | code | 0 | github-code | 13 |
73644966416 | import glob, os
# https://gis.stackexchange.com/questions/227271/where-are-the-temporary-output-layers-from-qgis-processing-algorithms-stored
# https://gis.stackexchange.com/questions/169090/how-to-open-and-save-qgis-datasource-with-to-lower-case-fields
# Define path to directory of your csv files
# path_to_csv = "T:/ideo_bfc/DONNEES/PLATEFORME/ENTRANTE/SRIT/"
path_to_csv = "G:\00_data_ref\sdis\Telechargement_1521757164_6810\6c7f678e-aecb-46bf-a1b0-7785867cec2b_1521757164_7975"
# Set current directory to path of csv files
os.chdir(path_to_csv)
# Find each .csv file and load them as vector layers
for fname in glob.glob("*.csv"):
uri = "file:///" + path_to_csv + fname + "?delimiter=%s&crs=epsg:4326&xField=%s&yField=%s" % (",", "Longitude", "Latitude")
name = fname.replace('.csv', '')
lyr = QgsVectorLayer(uri, name, 'delimitedtext')
QgsMapLayerRegistry.instance().addMapLayer(lyr)
# https://gis.stackexchange.com/questions/131104/exporting-several-files-at-same-time-in-qgis
# on save toutes les layers
for vLayer in iface.mapCanvas().layers():
QgsVectorFileWriter.writeAsVectorFormat( vLayer,
path_to_csv + vLayer.name() + ".shp", "utf-8",
vLayer.crs(), "ESRI Shapefile" )
| jerbou/Python_Qgis_stuff | multi_csv.py | multi_csv.py | py | 1,233 | python | en | code | 0 | github-code | 13 |
12619882362 | from .. import db
from . import BaseModel
from sqlalchemy import Column, Date, ForeignKey, Integer, JSON, String, Float, Boolean, BigInteger
from sqlalchemy.orm import relationship, backref
class BlockchainNetwork(BaseModel):
__tablename__ = 'blockchain_network'
name = Column(String(1000), nullable=False, unique=True)
short_name = Column(String(1000), nullable=False, unique=True)
chain_id = Column(Integer, nullable=False, unique=True)
network_id = Column(Integer, nullable=False, unique=True)
supported = Column(Boolean, default=False)
currency = Column(String(30), nullable=False)
block_explorer = Column(String(500))
def __str__(self):
return self.name
class NFTContract(BaseModel):
__tablename__ = 'collection'
name = Column(String(3000))
symbol = Column(String(1000))
contract_address = Column(String(50), nullable=False)
contract_type = Column(String(50), nullable=False)
image = Column(String(1000))
verified = Column(Boolean, default=False)
search_count = Column(BigInteger, default=1)
network_id = Column(Integer, ForeignKey('blockchain_network.id'))
network = relationship('BlockchainNetwork', backref='nft_contract')
def __str__(self):
return self.contract_address
| ljrahn/digi-markets | services/server/src/models/web3.py | web3.py | py | 1,283 | python | en | code | 0 | github-code | 13 |
71913541779 | # description : makes text file of random people (first name, last name, job position, favorite color)
# author : Cédric-Antoine Ouellet
# github : www.github.com/cedricouellet
# website : cedricao.tk
import random
import os
from time import sleep
# putting OOP into practice
class Person():
'''A template for a person with a first name, last name, job title and favorite color'''
# collect list of data and read them
FIRST_NAMES = open('first_names.txt', 'r').readlines()
LAST_NAMES = open('last_names.txt', 'r').readlines()
JOBS = open('jobs.txt', 'r').readlines()
COLORS = open('colors.txt', 'r').readlines()
def __init__(self, first_name, last_name, job_title, favorite_color):
self.firstName = first_name
self.lastName = last_name
self.job = job_title
self.color = favorite_color
def make_person():
''' Summary : Used by method `generate_list` to make a list of people.
\nAssigns a random `firstName`, `lastName`, `job` and `color`.
\nParams: `none`.
\nOutput: A `Person` with attributes.
'''
firstName = random.choice(Person.FIRST_NAMES)
lastName = random.choice(Person.LAST_NAMES)
job = random.choice(Person.JOBS)
color = random.choice(Person.COLORS)
return Person(firstName, lastName, job, color)
def generate_list(amount, name):
'''
Summary: Generates a `.txt` file of people (first name, last name, job title, favorite color).
Params:
(int) number : number of people to be created.
(string) name : name of the file (without `.txt`).
Output: `.txt` file.
'''
people = []
for i in range(amount):
people.append(Person.make_person())
if os.path.exists(name + '.txt'):
f = open(name + '.txt', 'a') # append to existing file
else:
f = open(name + '.txt', 'x') # make new file
i = 0 # init counter
for p in people:
# remove `\n` from values (due to reading documents via `readlines()`)
if p.firstName.endswith('\n'):
p.firstName = p.firstName[:-1]
if p.lastName.endswith('\n'):
p.lastName = p.lastName[:-1]
if p.job.endswith('\n'):
p.job = p.job[:-1]
if p.color.endswith('\n'):
p.color = p.color[:-1]
# write entries onto the `.txt` file
f.write('{} {}, {}, {}'.format(p.firstName, p.lastName, p.job, p.color))
f.write('\n')
i = i + 1 # increment counter
def main():
# Test: creates a file named "people_test_list" with 15 people
Person.generate_list(amount=15, name='people_test_list')
if __name__ == "__main__":
main()
| cedricouellet/py-random-person-generator | main.py | main.py | py | 3,017 | python | en | code | 0 | github-code | 13 |
40852798881 | import glob
import cv2
import shutil, os
import numpy as np
from matplotlib import pyplot as plt
from random import randint
def blending(pathRgb, pathSmoke):
src = cv2.imread(pathRgb)
smoke = cv2.imread(pathSmoke, cv2.IMREAD_UNCHANGED)
height, width, depth = src.shape
# print(smoke.shape)
smoke = cv2.resize(smoke, (width, height))
# print(smoke.shape)
smokeRgb = smoke[:, :, 0:3]
smokeAlpha = smoke[:, :, 3]
src = src.astype(float)
smokeRgb = smokeRgb.astype(float)
smokeAlpha = smokeAlpha.astype(float) / 255.0
smokeAlpha = cv2.merge([smokeAlpha, smokeAlpha, smokeAlpha])
# print(smokeAlpha.shape, ' ' , src.shape)
src = cv2.multiply(1.0 - smokeAlpha, src)
smokeRgb = cv2.multiply(smokeAlpha, smokeRgb)
ret = cv2.add(src, smokeRgb)
ret = ret.astype(np.uint8)
height, width, depth = ret.shape
width = round(width / 1.5)
height = round(height / 1.5)
ret = cv2.resize(ret, (width, height))
return(ret)
smokeImg = '/home/citlab/SmokeData/smokepatent/'
envirImg = '/home/citlab/SmokeData/SUNRGBD-cleanup/SUNRGBD/trainval/rgb/'
fileSmoke = [f for f in glob.glob(smokeImg + "**/*.png", recursive=True)]
fileEnvir = [f for f in glob.glob(envirImg + "**/*.jpg", recursive=True)]
# shutil.copy(fileSmoke[0], '/home/citlab/SmokeData/data/train/neg')
numSmoke = len(fileSmoke)
for num in range(10):
print(num)
id1 = num * 4
id2 = id1 + 1
id3 = id2 + 1
id4 = id3 + 1
x1 = randint(0, numSmoke - 1)
x2 = randint(0, numSmoke - 1)
# shutil.copy(fileEnvir[id1], '/home/citlab/SmokeData/data/train/neg')
# shutil.copy(fileEnvir[id3], '/home/citlab/SmokeData/data/val/neg')
name1 = '/home/citlab/SmokeData/data/train/pos/' + str(num) + '.png'
name2 = '/home/citlab/SmokeData/data/val/pos/' + str(num) + '.png'
src1 = blending(fileEnvir[id2], fileSmoke[x1])
src2 = blending(fileEnvir[id4], fileSmoke[x2])
# cv2.imwrite(name1, blending(fileEnvir[id2], fileSmoke[x1]))
# cv2.imwrite(name2, blending(fileEnvir[id4], fileSmoke[x2]))
# cv2.imshow("tmp1 ", src1)
# cv2.imshow("tmp2", src2)
# if (cv2.waitKey() == 'q') :
# break
# plt.title('RGB image')
plt.imshow(src1)
plt.show()
# print(fileEnvir[id2], ' ', fileSmoke[x1])
# print(fileEnvir[id4], ' ', fileSmoke[x2])
# cv2.imwrite('/home/citlab/tmp.png', blending(fileEnvir[0], fileSmoke[0])) | thangylvp/pytorch | genSmoke/loadSmoke.py | loadSmoke.py | py | 2,449 | python | en | code | 1 | github-code | 13 |
73758952979 | # Run this file in valgrind with:
# PYTHONMALLOC=malloc valgrind --tool=memcheck --leak-check=yes --show-leak-kinds=definite --track-origins=yes --num-callers=12 --suppressions=valgrind-python.supp python3 test_memleak_granulator.py
# There should not be any definitely lost bytes.
from pyo import *
import random
s = Server(audio="manual").boot().start()
sr = int(s.getSamplingRate())
t1 = DataTable(sr, init=[random.uniform(-1,1) for i in range(sr)])
t2 = DataTable(sr, init=[random.uniform(-1,1) for i in range(sr)])
env1 = WinTable()
env2 = WinTable(3)
a = Granulator(t1, env1)
a.table = t2
a.env = env2
a.pitch = Sig(0.5)
a.pos = Sig(0.5)
a.dur = Sig(0.1)
a.grains = 4
a.grains = 12
b = Looper(t1)
b.table = t2
b.pitch = Sig(1.5)
b.start = Sig(0.5)
b.dur = Sig(0.5)
b.xfade = Sig(10.5)
b.xfadeshape = 2
c = Granule(t1, env1)
c.table = t2
c.env = env2
c.dens = Sig(50.5)
c.pitch = Sig(0.5)
c.pos = Sig(0.5)
c.dur = Sig(0.7)
d = Particle(t1, env1, chnls=2)
d.table = t2
d.env = env2
d.dens = Sig(50.5)
d.pitch = Sig(0.5)
d.pos = Sig(0.5)
d.dur = Sig(0.7)
d.dev = Sig(0.5)
d.pan = Sig(0.5)
e = Particle2(t1, env1, chnls=2)
e.table = t2
e.env = env2
e.dens = Sig(50.5)
e.pitch = Sig(0.5)
e.pos = Sig(0.5)
e.dur = Sig(0.7)
e.dev = Sig(0.5)
e.pan = Sig(0.5)
e.filterfreq = Sig(2000)
e.filterq = Sig(10.5)
e.filtertype = Sig(0.5)
s.process()
s.process()
s.stop()
s.shutdown()
| belangeo/pyo | tests/valgrind/test_memleak_granulator.py | test_memleak_granulator.py | py | 1,387 | python | en | code | 1,221 | github-code | 13 |
7704566220 | from swarmops.Optimize import SingleRun
from swarmops import tools
########################################################################
class LUS(SingleRun):
"""
Perform a single optimization run using Local Unimodal Sampling (LUS).
In practice, you would typically perform multiple optimization runs using
the MultiRun-class. The reason is that LUS is a heuristic optimizer so
there is no guarantee that an acceptable solution is found in any single
run. It is more likely that an acceptable solution is found if you perform
multiple optimization runs.
"""
# Name of this optimizer.
name = "LUS"
name_full = "Local Unimodal Sampling"
# Number of control parameters for LUS. Used by MetaFitness-class.
num_parameters = 1
# Lower boundaries for the control parameters of LUS. Used by MetaFitness-class.
parameters_lower_bound = [0.1]
# Upper boundaries for the control parameters of LUS. Used by MetaFitness-class.
parameters_upper_bound = [100.0]
@staticmethod
def parameters_dict(parameters):
"""
Create and return a dict from a list of LUS parameters.
This is useful for printing the named parameters.
:param parameters: List with LUS parameters assumed to be in the correct order.
:return: Dict with LUS parameters.
"""
return {'gamma': parameters[0]}
@staticmethod
def parameters_list(gamma):
"""
Create a list with LUS parameters in the correct order.
:param gamma: Gamma-parameter (see paper reference for explanation).
:return: List with LUS parameters.
"""
return [gamma]
# Default parameters for LUS which will be used if no other parameters are specified.
parameters_default = [3.0]
def __init__(self, problem, parameters=parameters_default, parallel=False, *args, **kwargs):
"""
Create object instance and perform a single optimization run using LUS.
:param problem:
The problem to be optimized. Instance of Problem-class.
:param parameters:
Control parameters for LUS.
:param parallel: False. LUS cannot run in parallel except through MultiRun.
:return:
Object instance. Get the optimization results from the object's variables.
- best is the best-found solution.
- best_fitness is the associated fitness of the best-found solution.
- fitness_trace is an instance of the FitnessTrace-class.
"""
# Copy arguments to instance variables.
self.problem = problem
# Unpack control parameters.
gamma = parameters[0]
# Derived control parameter.
self.decrease_factor = 0.5 ** (1.0 / (gamma * problem.dim))
# Initialize parent-class which also starts the optimization run.
SingleRun.__init__(self, *args, **kwargs)
def _optimize(self):
"""
Perform a single optimization run.
This function is called by the parent-class.
"""
# Convenience variable for fitness function.
f = self.problem.fitness
# Convenience variables for search-space boundaries.
lower_init = self.problem.lower_init
upper_init = self.problem.upper_init
lower_bound = self.problem.lower_bound
upper_bound = self.problem.upper_bound
# Initialize the range-vector to full search-space.
d = upper_bound - lower_bound
# Search-space dimensionality.
dim = self.problem.dim
# Initialize x with random position in search-space.
x = tools.rand_array(lower=lower_init, upper=upper_init)
# Compute fitness of initial position.
fitness = f(x)
# Update the best-known fitness and position.
# The parent-class is used for this.
self._update_best(fitness=fitness, x=x)
# Perform optimization iterations until the maximum number
# of fitness evaluations has been performed.
# Count starts at one because we have already calculated fitness once above.
evaluations = 1
while evaluations < self.max_evaluations:
# Sample new position y from the bounded surroundings
# of the current position x.
y = tools.sample_bounded(x=x, d=d, lower=lower_bound, upper=upper_bound)
# Compute new fitness.
new_fitness = f(y, limit=fitness)
# If improvement to fitness.
if new_fitness < fitness:
# Update fitness and position.
fitness = new_fitness
x = y
# Update the best-known fitness and position.
# The parent-class is used for this.
self._update_best(fitness=fitness, x=x)
else:
# Otherwise decrease the search-range.
d *= self.decrease_factor
# Call parent-class to print status etc. during optimization.
self._iteration(evaluations)
# Increment counter.
evaluations += 1
########################################################################
| Hvass-Labs/swarmops | swarmops/LUS.py | LUS.py | py | 5,244 | python | en | code | 70 | github-code | 13 |
9087543830 | #https://www.acmicpc.net/problem/20438
#백준 20438번 출석체크(그리디, 누적합)
#import sys
#input = sys.stdin.readline
n, k, q, m = map(int, input().split())
students = [0]*(n+3)
sleep = set(map(int, input().split()))
attend = list(map(int, input().split()))
attendPossible = set()
for code in attend:
if code in sleep:
continue
for ncode in range(code, n+3, code):
if ncode not in sleep:
attendPossible.add(ncode)
for i in range(3, n+3):
students[i] += students[i-1]
if i in attendPossible:
students[i] += 1
for _ in range(m):
s, e = map(int, input().split())
print((e-s+1)-(students[e]-students[s-1]))
#python으로 시간 제한을 맞추기 위해서는 누적합을 활용해서 작성해야 한다. | MinsangKong/DailyProblem | 06-29/2-2.py | 2-2.py | py | 796 | python | en | code | 0 | github-code | 13 |
6589445746 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
' Simple ORM using metaclass '
# 基类
class Field(object):
def __init__(self, name, column_type):
self.name = name
self.column_type = column_type
def __str__(self):
return '<%s:%s>' % (self.__class__.__name__, self.name)
# 字符串型字段
class StringField(Field):
def __init__(self, name):
super(StringField, self).__init__(name, 'varchar(100)')
# 整型字段
class IntegerField(Field):
def __init__(self, name):
super(IntegerField, self).__init__(name, 'bigint')
# 定义元类
class ModelMetaclass(type):
def __new__(cls, name, bases, attrs):
# 如果类的名称为Model,直接返回
if name == 'Model':
return type.__new__(cls, name, bases, attrs)
# 如果类的名称不为Model,打印类名
print('Found model: %s' % name)
# 创建一个字典,存储所有Field类型的属性
mappings = dict()
for k, v in attrs.items():
if isinstance(v, Field):
print('Found mapping: %s ==> %s' % (k, v))
mappings[k] = v
# 从类中删除Field类型的属性
for k in mappings.keys():
attrs.pop(k)
# 在类中使用__mappings__属性保存属性和列的映射关系
attrs['__mappings__'] = mappings
# 在类中使用__table__属性保存表名,假设表名和类名一致
attrs['__table__'] = name
return type.__new__(cls, name, bases, attrs)
# 定义一个继承dict的Model类
class Model(dict, metaclass=ModelMetaclass):
def __init__(self, **kw):
super(Model, self).__init__(**kw)
def __getattr__(self, key):
try:
return self[key]
except KeyError:
raise AttributeError(r"'Model' object has no attribute '%s'" % key)
def __setattr__(self, key, value):
# 继承了dict,可以直接存储到当前对象字典中
self[key] = value
def save(self):
fields = []
params = []
args = []
for k, v in self.__mappings__.items():
fields.append(v.name)
params.append('?')
args.append(getattr(self, k, None))
sql = 'insert into %s (%s) values (%s)' % (self.__table__, ','.join(fields), ','.join(params))
print('SQL: %s' % sql)
print('ARGS: %s' % str(args))
# testing code:
class User(Model):
id = IntegerField('id')
name = StringField('username')
email = StringField('email')
password = StringField('password')
u = User(id=12345, name='Michael', email='20. 单元测试@orm.org', password='my-pwd')
u.save()
| lazzman/PythonLearn | Python3_Learn/7. 面向对象编程高级/元类案例orm.py | 元类案例orm.py | py | 2,700 | python | en | code | 4 | github-code | 13 |
42344393037 | import codecs
from pydoc import describe
import time, array
import numpy as np
import os, sys
import imageio
import cv2
from tqdm import tqdm_notebook as tqdm
from PIL import Image
import keras
from keras.layers import Input, Conv2D, MaxPooling2D, Flatten, Dropout, Dense, LSTM, TimeDistributed, RepeatVector, BatchNormalization
from keras.models import Model, Sequential
from keras import regularizers
# from tensorflow.keras import layers, models
# from tensorflow.keras.optimizers import Adam
import PIL.Image as Image
from keras.preprocessing import image
from sklearn.model_selection import train_test_split
import matplotlib.pyplot as plt
def convert_asm_to_images(sourcepath, destpath):
start_time=time.time()
files=os.listdir(sourcepath)
print('SourcePath: ',sourcepath)
print('Destination path', destpath)
print('converting..')
for file in tqdm(files):
filepath = sourcepath + "/" + file
f=codecs.open(filepath,'rb')
ln=os.path.getsize(filepath)
width=64
rem=int(ln/width)
a=array.array("B")
a.frombytes(f.read())
f.close()
g=np.reshape(a[:width*width], (width,width))
g=np.uint8(g)
imageio.imwrite(destpath+"/"+file +'.png', g)
print('Files converte successfully')
print('Time take to converte file: ',(time.time()-start_time)/3600)
def create_dir(directory):
try:
if not (os.path.exists(directory)):
os.makedirs(directory)
except OSError:
if not os.path.isdir(directory):
raise
size = 256, 256 #바꾸고 싶은 사이즈
def resize_and_crop(img_path, size, crop_type='middle'):
files = os.listdir(img_path)
for file in files:
name = img_path + "/" + file
cnn_name = str(file)+"_cnn.png"
os.chdir(img_path)
img = Image.open(file)
img_ratio = img.size[0] / float(img.size[1])
ratio = size[0] / float(size[1])
if ratio > img_ratio:
img = img.resize((size[0], int(round(size[0] * img.size[1] / img.size[0]))),
Image.ANTIALIAS)
if crop_type == 'top':
box = (0, 0, img.size[0], size[1])
elif crop_type == 'middle':
box = (0, int(round((img.size[1] - size[1]) / 2)), img.size[0],
int(round((img.size[1] + size[1]) / 2)))
elif crop_type == 'bottom':
box = (0, img.size[1] - size[1], img.size[0], img.size[1])
else :
raise ValueError('ERROR: invalid value for crop_type')
img = img.crop(box)
elif ratio < img_ratio:
img = img.resize((int(round(size[1] * img.size[0] / img.size[1])), size[1]),
Image.ANTIALIAS)
if crop_type == 'top':
box = (0, 0, size[0], img.size[1])
elif crop_type == 'middle':
box = (int(round((img.size[0] - size[0]) / 2)), 0,
int(round((img.size[0] + size[0]) / 2)), img.size[1])
elif crop_type == 'bottom':
box = (img.size[0] - size[0], 0, img.size[0], img.size[1])
else :
raise ValueError('ERROR: invalid value for crop_type')
img = img.crop(box)
else :
img = img.resize((size[0], size[1]), Image.ANTIALIAS)
os.chdir(img_path)
img.save(cnn_name, "PNG")
os.remove(name)
def cnn_model():
model = keras.models.Sequential()
model.add(Conv2D(filters = 128, kernel_size = (5,5), activation= 'relu', input_shape = (256, 256, 3)))
model.add(MaxPooling2D(pool_size = (2,2), strides = (1,1)))
model.add(Dropout(0.2))
model.add(Conv2D(64, (3,3), activation='relu'))
model.add(MaxPooling2D(pool_size=(2,2), strides = (1,1), padding='same'))
model.add(Dropout(0.2))
model.add(Conv2D(32, (3,3), activation = 'relu'))
model.add(MaxPooling2D(pool_size = (2,2), strides=(1,1), padding='same'))
model.add(Dropout(0.2))
model.add(Conv2D(28, (2,2), activation = 'relu'))
model.add(MaxPooling2D(pool_size = (2,2), strides=(1,1), padding='same'))
model.add(Dropout(0.2))
model.add(Flatten())
model.add(Dense(128, activation = 'relu'))
model.add(Dense(64, activation = 'relu'))
model.add(Dense(32, activation = 'relu'))
model.add(Dense(1, activation = 'sigmoid'))
return model
dest_list = ["test_benign", "test_malware", "valid_benign", "valid_malware", "train_benign", "train_malware", "test", "valid", "train"]
dir_path = os.path.dirname(os.path.realpath(__file__))
in_path = os.listdir(dir_path)
# directory Create
for i in range(6,8):
destdir = os.path.join(dir_path, dest_list[i])
create_dir(destdir)
test_dir = dir_path + "/test/"
valid_dir = dir_path + "/valid/"
train_dir = dir_path + "/train/"
create_dir(test_dir + dest_list[0])
create_dir(test_dir + dest_list[1])
create_dir(valid_dir + dest_list[2])
create_dir(valid_dir + dest_list[3])
create_dir(train_dir + dest_list[4])
create_dir(train_dir + dest_list[5])
# Image Create & Resize
for sourcepath in in_path:
image_path = os.path.join(dir_path,sourcepath)
if os.path.isdir(image_path):
if sourcepath == "test_0":
destpath = os.path.join(test_dir, dest_list[0])
convert_asm_to_images(image_path, destpath)
resize_and_crop(destpath,size)
elif (sourcepath == "test_1"):
destpath = os.path.join(test_dir, dest_list[1])
convert_asm_to_images(image_path, destpath)
resize_and_crop(destpath,size)
elif (sourcepath == "valid_0"):
destpath = os.path.join(valid_dir, dest_list[2])
convert_asm_to_images(image_path, destpath)
resize_and_crop(destpath,size)
elif (sourcepath == "valid_1"):
destpath = os.path.join(valid_dir, dest_list[3])
convert_asm_to_images(image_path, destpath)
resize_and_crop(destpath,size)
elif (sourcepath == "train_0"):
destpath = os.path.join(train_dir, dest_list[4])
convert_asm_to_images(image_path, destpath)
resize_and_crop(destpath,size)
elif (sourcepath == "train_1"):
destpath = os.path.join(train_dir, dest_list[5])
convert_asm_to_images(image_path, destpath)
resize_and_crop(destpath,size)
# CNN 코드
IMAGE_WIDTH = 256
IMAGE_HEIGHT = 256
IMAGE_SHAPE = (IMAGE_WIDTH, IMAGE_HEIGHT)
BATCH_SIZE = 16
train_gen = image.ImageDataGenerator(horizontal_flip = True,
rotation_range = 35,
rescale = 1./255,
zoom_range = [0.7,1.5],
brightness_range = (0.7,1.0),
width_shift_range = 0.1,
height_shift_range = 0.1)
VT_gen = image.ImageDataGenerator(rescale = 1./255)
batch_size = 16
# 실제 경로만 맞춰주면 됨.
dir_path = os.path.dirname(os.path.realpath(__file__))
train_genorator = VT_gen.flow_from_directory(dir_path+"/train/", target_size = IMAGE_SHAPE, batch_size = batch_size , class_mode='binary')
val_genorator = VT_gen.flow_from_directory(dir_path+"/valid/", shuffle = False, target_size = IMAGE_SHAPE, batch_size = batch_size, class_mode='binary')
test_genorator = VT_gen.flow_from_directory(dir_path+"/test/", shuffle = False, target_size = IMAGE_SHAPE, batch_size = batch_size, class_mode='binary')
print(train_genorator)
model = cnn_model()
model.compile(loss='binary_crossentropy', optimizer = 'adam', metrics=['accuracy'])
print("="*30)
print("model shape")
print("="*30)
print(model.summary())
print("[>] fitting start!!!!")
history = model.fit(train_genorator, epochs = 1, validation_data = val_genorator)
# To graph
# accurany to graph
acc_train = history.history['accuracy']
acc_val = history.history['val_accuracy']
epochs = range(1,11)
plt.plot(epochs, acc_train, 'g', label='Training accuracy')
plt.plot(epochs, acc_val, 'b', label='validation accuracy')
plt.title('Training and Validation accuracy')
plt.xlabel('Epochs')
plt.ylabel('Accuracy')
plt.legend()
plt.show()
# loss to graph
loss_train = history.history['loss']
loss_val = history.history['val_loss']
epochs = range(1,11)
plt.plot(epochs, loss_train, 'g', label='Training loss')
plt.plot(epochs, loss_val, 'b', label='validation loss')
plt.title('Training and Validation loss')
plt.xlabel('Epochs')
plt.ylabel('Loss')
plt.legend()
plt.show()
print("[>] check start!!!!")
X_pred = model.predict(test_genorator)
X_accurancy=model.evaluate(test_genorator)
print("pred :",X_pred)
print("accurancy :",X_accurancy)
model.save('cnn1.h5') | nebulayoon/malware-classification | cnn.py | cnn.py | py | 8,771 | python | en | code | 0 | github-code | 13 |
27702378803 | #Title: hourly_model.py
#Author: Tony Chang
#Date: 9/2/2015
#Abstract: Test code to transform daily temperature values into hourly under various algorithms
#Newton's Law of Cooling method
#dP/dt = k(P-A)
#where A is the ambient temperature, P is the phloem temperature, k is the rate of temperature transfer from tree to phloem
#from regression estimates, k = 0.5258 and k = 1.3357 under the Vienna and Ranch parameterizations respectively
#Newton's models
#P(t + del_t) = P(t) + k[P(t) -A(t)] del_t
#take del_t = 1 since minimum time step is 1 hour
#this will track Northern aspects well, but requires hourly temperatures (which are unavailable)
#
#alternatively we can use the Cosine method
#
#P_t = 0.5*(P_max + P_min) + 0.5*(P_max - P_min) * cos(pi + (pi*(t-t_dmin)/(t_dmax-t_dmin)) for t in (t_dmin, t_dmax)
# where P_max is the max daily temp, P_min is the min daily temp,
# t is the julian hour, t_dmin is the julian hour for day when the minimum temperature is reached,
# t_dmax is the julian hour for day when the maximum temperatuer is reached
import numpy as np
import matplotlib.pyplot as plt
import timeit
import os
import netCDF4 as nc
os.chdir('E:\\TOPOWX')
import geotool as gt
import util_dates as utld
import gdal as gdal
import time
import pandas as pd
import datetime as datetime
import pytz as pytz
import calendar as calendar
def cos_transform(P_max, P_min, t_dmin, t_dmax, t_dmin_next, t):
#function for transforming daily data to hourly using a cosine function
#if (t<t_dmin):
# P_t = 0.5*(P_max + P_min) + 0.5*(P_max - P_min) * cos(((pi*(t-t_dmax))/((t_dmin-1)-t_dmax))) #not right yet....
#if ((t>t_dmin_prev) and (t<t_dmax)):
if (t<t_dmax):
P_t = 0.5*(P_max + P_min) + 0.5*(P_max - P_min) * cos(pi + ((pi*(t-t_dmin))/(t_dmax-t_dmin)))
elif (t>t_dmax):
P_t = 0.5*(P_max + P_min) + 0.5*(P_max - P_min) * cos(((pi*(t-t_dmax))/((t_dmin_next)-t_dmax)))
return(P_t)
def cos_transform_array(P_max, P_min, t_dmin, t_dmax, t_dmin_next, t):
#perform for an array
out = np.zeros(np.shape(P_max))
out[t<t_dmax] = 0.5*(P_max[t<t_dmax] + P_min[t<t_dmax]) + 0.5*(P_max[t<t_dmax] - P_min[t<t_dmax]) * np.cos(np.pi + ((np.pi*(t[t<t_dmax]-t_dmin[t<t_dmax]))/(t_dmax[t<t_dmax]-t_dmin[t<t_dmax])))
out[t>t_dmax] = 0.5*(P_max[t>t_dmax] + P_min[t>t_dmax]) + 0.5*(P_max[t>t_dmax] - P_min[t>t_dmax]) * np.cos(((np.pi*(t[t>t_dmax]-t_dmax[t>t_dmax]))/((t_dmin_next[t>t_dmax])-t_dmax[t>t_dmax])))
return(out)
def ds_unix_time_convert(ds, ds_variable, julian_date, year, tz = "US/Mountain"):
#takes the netcdf dataset and converts it to hours for the given julien date
HOURS_PER_SECOND = 1/(60*60)
date_info = date_info = '%s%s'%(ds.variables['time'].units[11:15], ds.variables['time'][julian_date-1]+1)
j_date = datetime.datetime.strptime(date_info, '%Y%j')
local_tz = pytz.timezone(tz)
local_dt = local_tz.localize(j_date) #assign the time zone
utc_dt = local_dt.astimezone(pytz.utc)
unixtime_local = calendar.timegm(local_dt.timetuple())
unixtime_utc = calendar.timegm(utc_dt.timetuple())
#month and date convert to unixtime at 00:00:00
#subtract this unixtime_utc from the data arrays
#divide data by 3600 to get hours
out_ds = (ds.variables[ds_variable][julian_date-1] - unixtime_utc)* HOURS_PER_SECOND
return(out_ds)
def main():
#now we need to adapt this to consider more days
#open the topowx data
workspace = 'E:\\TOPOWX\\annual\\'
year = 1948
filename = '%s%s\\%s_%s.nc' %(workspace, 'tmin', 'tmin', year)
xmax = -108.19583334006; xmin = -112.39583333838; ymin = 42.270833326049996; ymax = 46.19583332448 # GYE bounds
AOA = [xmin, ymin, xmax, ymax] #specify the bounds for the FIA data
nc_ds = nc.Dataset(filename)
max_x_i = np.where(nc_ds.variables['lon'][:]>=AOA[2])[0][0]
min_x_i = np.where(nc_ds.variables['lon'][:]>=AOA[0])[0][0]
min_y_i = np.where(nc_ds.variables['lat'][:]<=AOA[3])[0][0]
max_y_i = np.where(nc_ds.variables['lat'][:]<=AOA[1])[0][0]
ndays = len(nc_ds.variables['time'])
dim = np.shape(nc_ds.variables['tmin'][0,min_y_i:max_y_i,min_x_i:max_x_i])
del nc_ds #remove this data file which is being used for reference
tmax_name = '%s%s\\%s_%s.nc' %(workspace, 'tmax', 'tmax', year)
P_max = nc.Dataset(tmax_name).variables['tmax'][:,min_y_i:max_y_i,min_x_i:max_x_i]
tmin_name = '%s%s\\%s_%s.nc' %(workspace, 'tmin', 'tmin', year)
P_min = nc.Dataset(tmin_name).variables['tmin'][:,min_y_i:max_y_i,min_x_i:max_x_i]
julian_day_start = 1
#julian_day_end = ndays #this should be determined by the length of the climate data stack
julian_day_end = 4 #this should be determined by the length of the climate data stack
nhours = (julian_day_end-julian_day_start) * 24
hourly_temp = np.zeros((nhours,dim[0],dim[1])) #storage array for our output
hours = np.zeros((nhours,dim[0],dim[1]))
filename = 'K:\\NASA_data\\solar_out\\%s_snsrss.nc'%(year)
sp_ds = nc.Dataset(filename)
#need to find the high and low points and divide into 24 periods?
t_dmin_prev = np.ones(dim) * (julian_day_start-1) #initialize the first minimum
for i in range(julian_day_start, julian_day_end):
#get the sun position arrays
sunrise = ds_unix_time_convert(sp_ds, 'sunrise', i-1, year) /24 #in days
transit = ds_unix_time_convert(sp_ds, 'transit', i-1, year) /24
sunset = ds_unix_time_convert(sp_ds, 'sunset', i-1, year) /24
sunrise_next = (ds_unix_time_convert(sp_ds, 'sunrise', i, year) /24) + 1
day_hour = i-1
t_dmax = (sunset - transit)/2 + transit + day_hour #calculate the time when maximum temperature is reached in hours
t_dmin = sunrise + day_hour #calculate the time when minimum temperature is reached
t_dmin_next = sunrise_next + day_hour
#so we can make a time array that starts at the t_dmin_prev and then goes to the last t_dmin_next.
#but we would like to start there and then go to each hourly increment (1/24) of a day...
#this is not the most easy to perform maybe?
#so get the decimal position, then count up to mext?
bound_time = np.tensordot(np.linspace(0,1,25),(t_dmin_next-t_dmin_prev),axes=0) + t_dmin_prev ##problem here
#first off, maybe change all the values that are not the first nor last data point into whole integer values
#that way we can reference them?
for t_hour in range(len(bound_time)): #twenty four hour day division
index = day_hour * 24 + t_hour ##also problem
print(index)
#current_julian_hour = day_hour + t_hour/24 * np.ones(dim)
current_julian_hour = bound_time[t_hour]
current_P = cos_transform_array(P_max[i-1], P_min[i-1], t_dmin, t_dmax, t_dmin_next, current_julian_hour)
if ((i != julian_day_start) and (t_hour == 0)): #for the first new day
hourly_temp[index] = (current_P + last_P)/2 # take the average between two days
hours[index] = current_julian_hour
elif (t_hour == len(bound_time)-1): #last hour
last_P = current_P
else:
hourly_temp[index] = current_P
hours[index] = current_julian_hour
t_dmin_prev = bound_time[-1]
h_time = hours.mean(axis =(1,2))
t_hourly = hourly_temp.mean(axis=(1,2))
plt.plot(h_time, t_hourly)
##still not working right....@t.chang 09.02.2015
if __name__ == "__main__":
main()
| tonychangmsu/Python_Scripts | eco_models/mpb/hourly_model_09022015.py | hourly_model_09022015.py | py | 7,125 | python | en | code | 0 | github-code | 13 |
28351395019 | import torch
import torch.nn as nn
import transformers
"""
This script shows how to combine two transformer models to create a hybrid model.
This is just an experimentation and not a part of the project.
"""
bart_model_name = 'facebook/bart-base'
t5_model_name = 't5-base'
bart_model = transformers.AutoModelForSeq2SeqLM.from_pretrained(bart_model_name)
t5_model = transformers.AutoModelForSeq2SeqLM.from_pretrained(t5_model_name)
class HybridCombination(nn.Module):
def __init__(self, bart_model, t5_model):
super().__init__()
self.bart_model = bart_model
self.t5_model = t5_model
def forward(self, input_ids, attention_mask, decoder_input_ids, decoder_attention_mask):
bart_output = self.bart_model(input_ids=input_ids, attention_mask=attention_mask, decoder_input_ids=decoder_input_ids, decoder_attention_mask=decoder_attention_mask)
t5_output = self.t5_model(input_ids=input_ids, attention_mask=attention_mask, decoder_input_ids=decoder_input_ids, decoder_attention_mask=decoder_attention_mask)
output = bart_output + t5_output
return output
hybrid_model = HybridCombination(bart_model, t5_model)
# use the hybrid model to generate the summary of the text
text = "The quick brown fox jumps over the lazy dog."
tokenizer = transformers.AutoTokenizer.from_pretrained(bart_model_name)
input_ids = tokenizer(text, return_tensors='pt').input_ids
decoder_input_ids = tokenizer('summarize: ' + text, return_tensors='pt').input_ids
output = hybrid_model.generate(input_ids, decoder_input_ids=decoder_input_ids)
print(tokenizer.decode(output[0]))
# use the hybrid model to generate the summary of the text
text = "The quick brown fox jumps over the lazy dog."
tokenizer = transformers.AutoTokenizer.from_pretrained(t5_model_name)
input_ids = tokenizer(text, return_tensors='pt').input_ids
decoder_input_ids = tokenizer('summarize: ' + text, return_tensors='pt').input_ids
output = hybrid_model.generate(input_ids, decoder_input_ids=decoder_input_ids)
print(tokenizer.decode(output[0]))
# use the hybrid model to generate the summary of the text | nazhimkalam/gensum | Code/datascience/core/hybrid-combination.py | hybrid-combination.py | py | 2,127 | python | en | code | 2 | github-code | 13 |
42929142444 | '''
Write a program to sort a stack such that the smallest items are on the top.
You can use an additional temporary stack, but you may not copy
the elements into any other data structure (such as an array).
The stack supports the following operations: push, pop, peek, and isEmpty.
'''
class Stack:
def __init__(self, stackSize):
self.values = []
self.size = stackSize
self.currentSize = 0
def peek(self):
return self.values[-1]
def push(self, item):
if self.currentSize == self.size:
return None
else:
self.values.append(item)
self.currentSize += 1
return item
def pop(self):
if self.currentSize == 0:
return None
self.currentSize -= 1
item = self.values[self.currentSize]
self.values = self.values[:self.currentSize]
return item
def isFull(self):
return self.size == self.currentSize
def isEmpty(self):
return self.currentSize == 0
def sort_stack(stack):
temporary_stack = Stack(stack.size)
temp_value = None
temporary_stack.push(stack.pop())
while not stack.isEmpty():
if temporary_stack.peek() >= stack.peek():
temporary_stack.push(stack.pop())
else:
temp_value = stack.pop()
while not temporary_stack.isEmpty() and temporary_stack.peek() < temp_value:
stack.push(temporary_stack.pop())
temporary_stack.push(temp_value)
while not temporary_stack.isEmpty():
stack.push(temporary_stack.pop())
stack = Stack(6)
stack.push(4)
stack.push(7)
stack.push(2)
stack.push(1)
stack.push(45)
stack.push(32)
print(stack.values)
sort_stack(stack)
print(stack.values) | lmhbali16/algorithms | CTCI/chapter_3/sort_stack.py | sort_stack.py | py | 1,549 | python | en | code | 0 | github-code | 13 |
72228452498 | # -*- coding: utf-8 -*-
"""
Created on Tue Nov 17 07:14:09 2020
@author: lcam
"""
'''
formato do Arquivo de saída:
Nome do grupo membros do grupo
[INF1025, [Kaka,Ceci,Teco]], --> INF1025 Kaka Ceci Teco
[FIS1020, [Keko]],
[FIS1212, [Tata]]
[CAL1010,[kiko]]
'''
def salvaTabGruposNoArq(tabGrupos):
arqS=open('alunosPordisc.txt','w')
for el in tabGrupos: # el:[INF1025, [Kaka,Ceci,Teco]]
arqS.write("%s "%el[0]) # nome da disc
for nome in el[1]: # lista de alunos da disc
arqS.write("%s "%nome)
arqS.write('\n')
arqS.close()
def busca(l,valor):
for (i,el) in enumerate(l):
if el[0]==valor:
return i # local na lista do valor
return None # valor não está na lista
arqE=open('alunos.txt','r')
tabGrupos=[]
for linha in arqE:
linha=linha.strip()
lVals=linha.split(',')
nome=lVals[0]
disc=lVals[1]
faltas=int(lVals[2])
med=float(lVals[3])
pos =busca(tabGrupos,disc)
if pos != None:
tabGrupos[pos][1].append(nome)
else:
tabGrupos.append([disc,[nome]])
arqE.close()
salvaTabGruposNoArq(tabGrupos)
| luca16s/INF1025 | Arquivo/agrupamentosa.py | agrupamentosa.py | py | 1,150 | python | pt | code | 0 | github-code | 13 |
24654331541 | __author__ = 'Michael Kaldawi'
"""
Programmer: Michael Kaldawi
Class: CE 4348.501
Assignment: P01 (Program 1)
Program Description:
This program implements a prime number finder utilizing the sieve
of Eratosthenes and multi-threading.
"""
# Note: we are using numpy for our array processing to speed up
# runtime. numpy needs to be installed/ imported for this
# program to work.
import threading
import math
import cProfile
import numpy as np
# This function finds primes between 'start' and 'end' indices.
# The function returns a 1x(end-start) array of boolean values
# indicating primes as 'True'.
def find_primes(start, end):
# create an array of boolean True values, size: 1 x 'end'
array = np.ones(shape=end, dtype=np.bool)
# For each value 'i' in the True array, starting at 2, until the
# square root of the end value, mark multiples of 'i' as False.
# Hence, for i = 2, the values marked False would be {4, 6, 8, ...}
for i in range(2, int(math.sqrt(end)+1)):
if array[i]:
j = i**2
while j < end:
array[j] = False
j += i
# Return the array with a start value.
return {'start': start, 'array': array[start:]}
# This function prints the prime numbers marked by True in a
# passed True/ False array.
def print_primes(start, array):
total = 0
pos = start
for i in array:
if i:
print(pos, i)
total += 1
pos += 1
# print(total)
# a function to print the prime numbers marked by True in a
# passed True/ False array into a file
def write_primes(file_name, mode, start, array):
f = open(file_name, mode)
total = 0
pos = start
for i in array:
if i:
f.write(pos.__str__() + "\n")
total += 1
pos += 1
# f.write("total: " + total.__str__())
# Due to the nature of the profiling package cProfile, we require
# an additional function to start the thread.
class MyThread (threading.Thread):
def __init__(self, start, end):
threading.Thread.__init__(self)
self.begin = start
self.end = end
self.data = None
def run(self):
self.data = find_primes(self.begin, self.end)
# This function calculates the prime numbers between
# 2 and 1,000, then starts 10 threads to complete
# the calculation of prime numbers between 1,001 and 1,000,000.
def main():
# 'master_data' stores all boolean arrays. 'threads' is an
# array of child threads.
master_data = []
thread_data = []
threads = []
# Find the primes between 2 and 1000.
for i in find_primes(start=2, end=1000)['array']:
master_data.append(i)
# Make 10 threads.
for thread in range(0, 10):
if thread == 0:
threads.append(MyThread(1001, 100000))
else:
threads.append(MyThread(thread*100001, (thread+1)*100000))
# Start each child thread. Note, threads[-1] gets the last item in the list.
threads[-1].start()
# cProfile.runctx('start_child(threads[thread])',
# globals(), locals())
# Request each boolean array from the threads,
# and append the arrays to 'master_data'.
threads[9].join()
for each in range(0, 10):
for data_point in threads[each].data['array']:
master_data.append(data_point)
write_primes(file_name="primes.txt", mode="w", start=2, array=master_data)
print("number of primes found: " + master_data.count(1).__str__())
# cProfile.runctx('write_primes(file_name="primes.txt", '
# 'mode="w", start=2, array=master_data)',
# globals(), locals())
# This is our 'main' function. The first lines of code
# are executed here.
#
# This function executes main().
# Only the parent process can run this function.
if __name__ == '__main__':
main() | michael-kaldawi/Prime-Number-Multiprocessing | Multithreading.py | Multithreading.py | py | 3,895 | python | en | code | 1 | github-code | 13 |
43357939306 | #!/usr/bin/python3
import sys
if __name__ == "__main__":
if len(sys.argv) != 2:
print("#usage python", sys.argv[0],"<fastq>")
sys.exit()
InFASTQ = sys.argv[1]
lLane = []
iCnt = 0
iCntA, iCntC, iCntG, iCntT = 0,0,0,0
with open(InFASTQ) as fr:
for line in fr:
iCnt += 1
if iCnt % 4 == 2:
sSeq = line.strip()
iCntA += sSeq.count("A")
iCntC += sSeq.count("C")
iCntG += sSeq.count("G")
iCntT += sSeq.count("T")
print("A:",iCntA)
print("C:",iCntC)
print("G:",iCntG)
print("T:",iCntT)
| KennethJHan/Bioinformatics_Programming_101 | 065.py | 065.py | py | 522 | python | en | code | 0 | github-code | 13 |
24322275250 | """Choise team repair
Revision ID: 475448e80c95
Revises: cc0b9a8c6bec
Create Date: 2022-02-20 14:49:35.862420
"""
from alembic import op
import sqlalchemy as sa
from app.enums import ChoiceType
from app.choises import equipment, team_composition
# revision identifiers, used by Alembic.
revision = '475448e80c95'
down_revision = 'cc0b9a8c6bec'
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.add_column('log', sa.Column('team_composition', ChoiceType(team_composition), nullable=True))
op.add_column('log', sa.Column('equipment_repair', ChoiceType(equipment), nullable=True))
# op.drop_index('ix_log_pub_date', table_name='log')
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
# op.create_index('ix_log_pub_date', 'log', ['pub_date'], unique=False)
op.drop_column('log', 'equipment_repair')
op.drop_column('log', 'team_composition')
# ### end Alembic commands ###
| Pavel-Maksimov/shift_logs_flask | migrations/versions/475448e80c95_choise_team_repair.py | 475448e80c95_choise_team_repair.py | py | 1,046 | python | en | code | 0 | github-code | 13 |
40720066211 | # -*- coding: utf-8 -*-
"""
Created on Thu Feb 14 09:41:56 2019
@author: bradw
"""
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import statsmodels.api as sm
from statsmodels.tsa import stattools
from statsmodels.graphics.tsaplots import plot_acf, plot_pacf
#Data Wrangling
#Importing data into individual data frames
#US Cpi
US_cpi = pd.read_excel('Usa cpi data.xlsx')
US_cpi.columns = US_cpi.iloc[10]
US_cpi.drop(US_cpi.index[0:11],inplace=True)
US_cpi.drop(['HALF1','HALF2'],axis=1,inplace=True)
US_cpi.set_index('Year',inplace=True)
#AU Cpi
AU_cpi = pd.read_excel('Aus cpi data.xls',sheet_name='Data1')
AU_cpi = AU_cpi.iloc[9:,1]
AU_cpi = AU_cpi.pct_change()
#Exchange Rate Data
FX_rate = pd.read_csv('AUD_USD Historical Data.csv',index_col = 'Date',parse_dates=True)
FX_rate = FX_rate.iloc[:,0]
#Combining rows into a single column
#Creating Master Data Frame
df = pd.DataFrame()
#reducing Data into time frame required
US_cpi = US_cpi.loc['1986':'2018']
Aus_CPI = AU_cpi['1986-03-01':'2018-12-01']
#FX data is value not change like other data 1 month extra aded to time frame to limit NAN value from pct change
FX_rate = pd.DataFrame(FX_rate['Dec 18': 'Sep 85'])
#Converting to quaterly values
FX_rate = FX_rate[::3]
FX_rate['FX Return'] = (FX_rate['Price'] - FX_rate['Price'].shift(-1))/ FX_rate['Price'].shift(-1)
FX_rate = FX_rate[::-1]
FX_rate = FX_rate[2:]
FX_rate.drop('Price',axis=1,inplace=True)
FX_rate.index = Aus_CPI.index
#Converting monthly data to quatlerly
US_cpi['Q1'] = US_cpi[['Jan','Feb','Mar']].sum(axis=1)/100
US_cpi['Q2'] = US_cpi[['Apr','May','Jun']].sum(axis=1)/100
US_cpi['Q3'] = US_cpi[['Jul','Aug','Sep']].sum(axis=1)/100
US_cpi['Q4'] = US_cpi[['Oct','Nov','Dec']].sum(axis=1)/100
#Creating list from data for US CPI
US_CPI = []
for i in range(len(US_cpi)):
US_CPI.append(US_cpi['Q1'].iloc[i])
US_CPI.append(US_cpi['Q2'].iloc[i])
US_CPI.append(US_cpi['Q3'].iloc[i])
US_CPI.append(US_cpi['Q4'].iloc[i])
Usa_CPI = pd.Series(US_CPI)
#Adding in series to make final DataFrame
df['Usa CPI'] = Aus_CPI
df['Aus CPI'] = US_CPI
df['FX Change'] = FX_rate
#Creating log prices
df['Usa CPI Log'] = np.log(df['Usa CPI'] +1)
df['Aus CPI Log'] = np.log(df['Aus CPI'] +1)
df['FX Change Log'] = np.log(df['FX Change'] +1)
df['Usa Aus Diff'] = df['Aus CPI Log'] - df['Usa CPI Log']
#Reducing the DataFrame to just varaibles we want
df.drop(['Usa CPI','Aus CPI','FX Change'],axis=1,inplace=True)
#Data Analysis
#First step is checking for linearity of data
fig, axes = plt.subplots(nrows=3,ncols=1)
axes[0].plot(df['Usa CPI Log'])
axes[1].plot(df['Aus CPI Log'])
axes[2].plot(df['FX Change Log'])
plt.show()
#Using the AD Fuller test
ad_fuller = (stattools.adfuller(df['Usa CPI Log'],autolag ='BIC'))
print('ADF Statistic: %f' % ad_fuller[0])
print('ADF P Value: %f' % ad_fuller[1])
print('Critical Values:')
for key, value in ad_fuller[4].items():
print('\t%s: %.3f' % (key, value))
#Rejecting the Null(Unit Root), data is stationary
#Using the AD Fuller test
ad_fuller = (stattools.adfuller(df['Aus CPI Log'],autolag ='BIC'))
print('ADF Statistic: %f' % ad_fuller[0])
print('ADF P Value: %f' % ad_fuller[1])
print('Critical Values:')
for key, value in ad_fuller[4].items():
print('\t%s: %.3f' % (key, value))
#Rejecting the Null(Unit Root)), data is stationary
ad_fuller = (stattools.adfuller(df['FX Change Log'],autolag ='BIC'))
print('ADF Statistic: %f' % ad_fuller[0])
print('ADF P Value: %f' % ad_fuller[1])
print('Critical Values:')
for key, value in ad_fuller[4].items():
print('\t%s: %.3f' % (key, value))
#Rejecting the Null(Unit Root)), data is stationary
ad_fuller = (stattools.adfuller(df['Usa Aus Diff'],autolag ='BIC'))
print('ADF Statistic: %f' % ad_fuller[0])
print('ADF P Value: %f' % ad_fuller[1])
print('Critical Values:')
for key, value in ad_fuller[4].items():
print('\t%s: %.3f' % (key, value))
plot_acf(df['Usa CPI Log'], lags=10)
plot_acf(df['Aus CPI Log'], lags=10)
plot_acf(df['FX Change Log'], lags=10)
plot_acf(df['Usa Aus Diff'], lags=10)
plot_pacf(df['Usa CPI Log'], lags=10)
plot_pacf(df['Aus CPI Log'], lags=10)
plot_pacf(df['FX Change Log'], lags=10)
plot_pacf(df['Usa Aus Diff'], lags=10)
#Adding in t-1 and t-2 of USA CPI data variables into the model and checking the outputs. Mean reversion of ACF and 2 lag PACF
#Creating Variables
df['Usa CPI Log T-1'] = df['Usa CPI Log'].shift(1)
df['Usa CPI Log T-2'] = df['Usa CPI Log'].shift(2)
#Removing NAN values from data set
df = df[pd.notnull(df['Usa CPI Log T-2'])]
#Checking significance of ar1 and ar2 values
#Creating a model
y = df['Usa CPI Log']
X = df[['Usa CPI Log T-1','Usa CPI Log T-2']]
#Fitting Model
results = sm.OLS(y,X).fit()
print(results.summary())
#Both significant so including in model
#Creating a model
y = df['FX Change Log']
X = df.drop(['Aus CPI Log','FX Change Log','Usa CPI Log'],axis=1)
#Fitting Model
results = sm.OLS(y,X).fit()
print(results.summary())
#Checking for AC and PAC in model to make sure all is taken out with US CPI Log t-1 t-2
residuals = results.resid
plt.hist(residuals)
plt.show()
#Residuals seem to be normally distributed around a mean of 0
plot_acf(residuals, lags=10)
plot_pacf(residuals, lags=10)
#No auto correlation in the model
ad_fuller = (stattools.adfuller(residuals,autolag ='BIC'))
print('ADF Statistic: %f' % ad_fuller[0])
print('ADF P Value: %f' % ad_fuller[1])
print('Critical Values:')
for key, value in ad_fuller[4].items():
print('\t%s: %.3f' % (key, value))
#T stat outside of critical value for Engle Granger test for collinearity of 3.37 (T Stat -10.92)
#Reject the null, data has no collinearity therefore PPP does not hold from this sample.
| BradWebb101/PPP-USD-vs-AUD | PPP USD AUD.py | PPP USD AUD.py | py | 5,738 | python | en | code | 0 | github-code | 13 |
36711030095 | import os
import argparse
import hashlib
import base64
import pathlib
def main(args):
for f in pathlib.Path(args.i).glob('**/*'):
f = str(f)
if os.path.isdir(f):
continue
filename = f.replace(args.i, '')
with open(f, 'rb') as df:
data = df.read()
hash = base64.urlsafe_b64encode(hashlib.sha256(data).digest()).decode('latin1').rstrip('=')
size = os.path.getsize(f)
if not f.endswith('RECORD'):
print(f'{filename},sha256={hash},{size}')
else:
print(f'{filename},,')
if __name__ == '__main__':
ap = argparse.ArgumentParser()
ap.add_argument('-i', help='Input directory', required=True)
args = ap.parse_args()
main(args)
| cyberj0g/OpenCV-Custom | record_generator.py | record_generator.py | py | 657 | python | en | code | 0 | github-code | 13 |
32859345328 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
from string import whitespace
import re
from .. import Unit
from ...lib.argformats import multibin
class trim(Unit):
"""
Removes byte sequences at beginning and end of input data.
"""
def interface(self, argp):
argp.add_argument('junk', type=multibin,
default=[w.encode('ascii') for w in whitespace],
help='Character(s) to be removed, default is whitespace.', nargs='*')
one_side_only = argp.add_mutually_exclusive_group()
one_side_only.add_argument('-l', '--left-only', action='store_true')
one_side_only.add_argument('-r', '--right-only', action='store_true')
return super().interface(argp)
def process(self, data):
keep_running = True
while keep_running:
keep_running = False
for junk in self.args.junk:
jlen = len(junk)
if not self.args.right_only:
if data.startswith(junk):
if jlen == 1:
data = data.lstrip(junk)
else:
pattern = B'^(?:' + B''.join(B'\\x%02X' % X for X in junk) + B')+'
match = re.search(pattern, data)
data = data[match.end():]
keep_running = True
if not self.args.left_only:
if data.endswith(junk):
if jlen == 1:
data = data.rstrip(junk)
else:
pattern = B'(?:' + B''.join(B'\\x%02X' % X for X in junk) + B')+$'
match = re.search(pattern, data)
data = data[:match.start()]
keep_running = True
return data
| chubbymaggie/refinery | refinery/units/strings/trim.py | trim.py | py | 1,867 | python | en | code | null | github-code | 13 |
14688656261 | import os
import numpy as np
from features.gradient_features import choose_features
#path_directory = '/content/drive/MyDrive/Dataset_Educazone_Test/'
path_directory = 'E:/Job_Internships/Educazone/Dataset_Educazone_Test/'#input("Enter the directory of data: \n")
filename = path_directory#input("Enter the path where the data would be saved: \n")
directories = os.listdir(path_directory)
i = 0
label = []
features = []
for directory in directories :
file_path = path_directory + directory
print(file_path)
file_list = os.listdir(file_path)
for id in file_list :
image = file_path+ '/' + id
print('Image path : ', image)
feature = choose_features(image, 'hog')
features.append(feature)
label.append(i)
i += 1
filename = filename + '/features.npy'
np.save(filename, features)
arr = np.load(filename) | SohamChattopadhyayEE/gradientfeatures-with-PCA_GMM | Codes/feature_extraction.py | feature_extraction.py | py | 889 | python | en | code | 0 | github-code | 13 |
26422748410 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Thu Aug 9 16:24:43 2018
@author: Olivia Hull
Calculate the percent composition of a molecular orbital
in Gaussian, specify pop=full keyword
This package is written restrictively for Ag6-N2. Any deviation from my standard Ag6-N2 coordinate input
will need to be accounted for in the package. See relevant function comments
"""
import numpy as np
def control_center(filename, MO_array):
'''run this function to get the total result. Outputs the entire MO coefficient array 'data'
and the partitions into N7 and N8.'''
MO_array = sorted(MO_array)
line_count = get_line_count(filename)
raw_MO_data = get_raw_coefs(MO_array, line_count, filename)
clean_array = make_initial_array(raw_MO_data, line_count)
lines = determine_indexing_lines(MO_array)
data_array = cleanup_raw_coefs(raw_MO_data, line_count, MO_array, clean_array, lines)
N7, N8 = N2_CSPA_contributions(data_array, MO_array, line_count)
return data_array, N7, N8
def get_line_count(filename):
''' determines number of AOs (= number of MOs)'''
line_count = 0
with open(filename,'r') as f:
for line in f:
if (" Eigenvalues -- " in line):
MO_line = next(f)
while True:
if MO_line[0:19] != ' ':
line_count +=1
MO_line = next(f)
else:
break
break
return line_count
def get_raw_coefs(MO_array, line_count, filename):
''' pulls the raw coef data from gaussian output file and outputs the raw lines'''
data = []
MO = 0
orb_line = []
with open(filename, 'r') as f:
for line in f:
if ("Molecular Orbital Coefficients:" in line):
coupling_line = next(f)
while MO < len(MO_array):
if str(MO_array[MO]) in coupling_line.split() and str(MO_array[MO]) not in orb_line:
for i in coupling_line.split():
orb_line.append(i)
for i in range(line_count + 3):
data.append(coupling_line)
coupling_line = next(f)
MO += 1
if str(MO_array[MO]) in orb_line:
MO += 1
continue
else:
for i in range(line_count + 3):
coupling_line = next(f)
return data
def locate_MO_index(MO, raw_MO_data_line):
'''Quick function used to pull the correct data points for the cleanup function'''
counter = 0
split_MO_line = raw_MO_data_line.split()
for orb in split_MO_line:
if str(MO) == orb:
MO_col = counter
counter = counter+1
return MO_col # indexes from 0 (MO_col = 2 means third column)
def make_initial_array(raw_MO_data, line_count):
'''Sets up the cleaned-up array without adding the MO data yet. Just grabs the first four columns, which
contain info on the AO function names and atoms'''
clean_array = []
for val in range(line_count):
clean_array.append([])
if len(raw_MO_data[val+3].split()) == 9: #want val+3 because first three lines are not the data we want
for i in [0, 1, 2, 3]:
clean_array[val].append(raw_MO_data[val+3].split()[i])
elif len(raw_MO_data[val+3].split()) != 9:
clean_array[val].append(raw_MO_data[val+3].split()[0])
clean_array[val].append('blank')
clean_array[val].append('blank')
clean_array[val].append(raw_MO_data[val+3].split()[1])
else:
break
return clean_array
def determine_indexing_lines(MO_array):
'''Breaks up the inputted MO_array into groups based on modulus 5, i.e. the MO_array = [1,2, 3, 4, 5,6,7,8,9,10,11,12]
would output lines = [[1,2,3,4,5],[6,7,8,9,10],[11,12]]'''
MO = 0
next_MO = 1
counter = 0
lines = [[MO_array[MO]]]
while next_MO <= len(MO_array)-1:
while (MO_array[MO]-1)%5 < (MO_array[next_MO]-1)%5 and abs(MO_array[MO] - MO_array[next_MO]) < 5:
lines[counter].append(MO_array[next_MO])
if next_MO < len(MO_array)-1:
next_MO = next_MO + 1
else:
break
counter = counter + 1
if MO_array[next_MO] == MO_array[-1] and lines[-1][-1] == MO_array[next_MO]:
break
lines.append([])
MO = next_MO
next_MO = MO + 1
lines[counter].append(MO_array[MO])
return lines
def cleanup_raw_coefs(raw_MO_data, line_count, MO_array, clean_array, lines):
''' Cleans up the raw MO data lines and outputs an ordered MO/AO array (AOs on rows, MOs on columns)'''
jump_index = line_count + 3
clean_index = 0
temp_array = []
temp_array2 = []
big_counter = 0
lines_max = len(lines)
while big_counter < lines_max:
if str(lines[big_counter][0]) in raw_MO_data[clean_index].split():
for MO in lines[big_counter]:
MO_col = locate_MO_index(MO,raw_MO_data[clean_index])
for i in range(line_count):
temp_array.append([])
temp_array[i].append(raw_MO_data[clean_index+i+3].split()[-1:-6:-1])
temp_array2.append([])
temp_array2[i]=temp_array[i][0][::-1]
clean_array[i].append(temp_array2[i][MO_col])
temp_array=[]
temp_array2 =[]
big_counter = big_counter + 1
else:
clean_index = clean_index + jump_index
data_array = clean_array
return data_array
def N2_CSPA_contributions(data_array, MO_array, line_count):
'''performs C squared population analysis (see Ros and Schuit, Theoretica chimica acta, 1966, 4, 1, 1-12)
If you're not working with Ag6-N2, you'll need to change the second for loop to reflect the atom order'''
N7 = np.zeros(len(MO_array))
N8 = np.zeros(len(MO_array))
tots_sq = np.zeros(len(MO_array))
N7_sq_rat = np.ones(len(MO_array))
N8_sq_rat = np.ones(len(MO_array))
for i in data_array:
k = 0
for j in i[4::]:
tots_sq[k] = tots_sq[k] + abs(float(j))**2
k = k + 1
for i in range(len(data_array)):
if str(7) in data_array[i][1]: # must change these if the N order moves!!!
N7 = sq_sum_N(i,data_array,N7, line_count, 7)
if str(8) in data_array[i][1]:
N8 = sq_sum_N(i,data_array,N8, line_count, 8)
for i in range(len(tots_sq)):
N7_sq_rat[i] = 100.0*N7_sq_rat[i]*N7[i]/tots_sq[i]
N8_sq_rat[i] = 100.0*N8_sq_rat[i]*N8[i]/tots_sq[i]
pretty_N7 = [round(x,4) for x in N7_sq_rat]
pretty_N8 = [round(x,4) for x in N8_sq_rat]
return pretty_N7, pretty_N8
def sq_sum_N(i, data_array, N, line_count, atom_num):
skipper = 0
while data_array[i+skipper][1] == 'blank' or data_array[i+skipper][1] == str(atom_num):
n = 0
for j in data_array[i+skipper][4::]:
N[n] = N[n] + abs(float(j)) ** 2
n += 1
skipper += 1
if (i+skipper) == line_count:
break
return N
def test_AO_tots(data_array):
'''use with all MOs to test that AOs sum to 1'''
k = 0
row_sum_occ = np.zeros(len(data_array))
row_sum_tot = np.zeros(len(data_array))
for i in data_array:
for j in i[4:68]:
row_sum_occ[k] = row_sum_occ[k] + (float(j)) ** 2
for m in i[4::]:
row_sum_tot[k] = row_sum_tot[k] + (float(m)) ** 2
k += 1
def N2_contributions(data_array, MO_array, line_count):
N7 = np.zeros(len(MO_array))
N8 = np.zeros(len(MO_array))
tots = np.zeros(len(MO_array))
tots_sq = np.zeros(len(MO_array))
N7_rat = np.ones(len(MO_array))
N8_rat = np.ones(len(MO_array))
for i in data_array:
k = 0
horiz = 0.0
horiz_sq = 0.0
for j in i[4::]:
tots[k] = tots[k] + float(j)
tots_sq[k] = tots[k] + abs(float(j))**2
k = k + 1
horiz = horiz + float(j)
horiz_sq = horiz_sq + float(j) ** 2
for i in range(len(data_array)):
if str(7) in data_array[i][1]: # must change these if the N order moves!!!
N7 = sum_N(i,data_array,N7, line_count, 7)
if str(8) in data_array[i][1]:
N8 = sum_N(i,data_array,N8, line_count, 8)
for i in range(len(tots)):
N7_rat[i] = N7_rat[i]*N7[i]/tots[i]
N8_rat[i] = N8_rat[i]*N8[i]/tots[i]
def sum_N(i, data_array, N, line_count, atom_num):
skipper = 0
while data_array[i+skipper][1] == 'blank' or data_array[i+skipper][1] == str(atom_num):
n = 0
for j in data_array[i+skipper][4::]:
N[n] = N[n] + float(j)
n += 1
skipper += 1
if (i+skipper) == line_count:
break
return N
| oahull/NP-Code | MO_coefs.py | MO_coefs.py | py | 9,325 | python | en | code | 0 | github-code | 13 |
214312525 | # exercise 1:copy method
list_ = [1,1.5,'python',0b101,True,[20+10j,"india",range(10)]]
newlist_ = list_.copy()
print(list_)
print(newlist_)
# if we make any changes the parent copy it also changing in child copy why?
# what is deep copy and shallow copy?
list_[5].append("john")
print(list_)
print(newlist_)
#exercise 2:using extend
# case 1:the list only contins sub lists no values
list_=[[1,23,3],[5,4,4,5],[78,54,54,69]]
newlist_ = []
for lelements in list_:
# extending inner list with empty new list
newlist_.extend(lelements)
print(f'\ncase 1:the list only contins sub lists no values\n{newlist_}')
# list comprehension
print(f'\nlist comprehension\n{[k for lele in list_ for k in lele]}')
# case 2:the list contins sub lists and values
list_=[[1,23,3],[5,4,4,5],[78,54,54,69],58,96,6,"hello",6,56,4,]
newlist_ = []
for lelements in list_:
# checking inner list type is list or not
if type(lelements) is list:
newlist_.extend(lelements)
else:
newlist_.append(lelements)
print(f'\ncase2: the list contins sub lists and values \n{newlist_}')
# exercise 3:shop
tshirts_colour = ['white','black']
tshirts_sizes = ['Small','Medium','Large',"XtraLarge"]
diff_size_tshirts = []
for color in tshirts_colour:
for size in tshirts_sizes:
# printing all available tshirst colors and its sizes
print(f'\n "{color}" color "{size}" size tshirt')
#exercise 4:find duplicate value of index
l = [1,2,2,2,2,3,4,5,6,7,8,9,1,45,54,54,46,58,4,5]
# empty list to collect duplicate values
duplicate = []
for i in range(len(l)):
# taking next value of previous loop starting value
k=i+1
for j in range(k,len(l)):
# checking 1st value with next values and the value present in duplicate or not
if l[i] == l[j] and l[i] not in duplicate:
# appending the duplicate values
duplicate.append(l[i])
# printing the duplicate values index values
print(f'\nthe duplicate element is {l[i]} and its index is {l.index(l[i],l.index(l[i])+1)}')
# exercise 5:
list_1 = [[1,2,3,4],[2,3,4],[1,3,4,5,6],[1,2],[1],2]
for inerl in list_1:
# to avoid integers
if type(inerl) != int:
# if list contains more than two values
if len(inerl) > 2:
# to keep only two values in nested list
while len(inerl) > 2:
inerl.pop()
# To avoid if list has only 2 values
elif len(inerl) == 2:
pass
print(f'\n{list_1}')
# exercise 6:list comprehension
tshirts_colour = ['white','black']
tshirts_sizes = ['Small','Medium','Large',"XtraLarge"]
# list comprehnsion
data=[f'"{size}" size "{color}" color tshirt' for size in tshirts_sizes for color in tshirts_colour]
# printing each statement in data
for i in data:
print(f'\n{i}')
| nareshchari/tasks | listexercises.py | listexercises.py | py | 2,821 | python | en | code | 0 | github-code | 13 |
34692063956 | import itertools
import scipy
import pandas as pd
from statsmodels.stats.weightstats import *
from math import sqrt
from scipy import stats
from sklearn import model_selection, metrics, linear_model, ensemble
def my_proportions_confint_diff_rel(sample1, sample2, alpha = 0.05):
z = stats.norm.ppf(1 - alpha/2.)
sample = list(zip(sample1, sample2))
n = len(sample)
f = 0
g = 0
for x in sample:
if x[0] == 1 and x[1] == 0:
f += 1
if x[0] == 0 and x[1] == 1:
g += 1
low = float(f - g)/n - z*sqrt(float((f + g)) / n**2 - float((f - g))**2 / n**3)
high = float(f - g)/n + z*sqrt(float((f + g)) / n**2 - float((f - g))**2 / n**3)
return low, high
def my_p_value(expect_mean=9.5, std=0.4, n=160, sample_mean=9.57, alpha=0.95, alternative='two-sided'):
z = (sample_mean - expect_mean)/(std/sqrt(n))
Fz = stats.t.ppf(0.05/2,n-1)
S = 0.5*(1 + scipy.special.erf((z - 0)/sqrt(2*1**2)))
if alternative == 'two-sided':
p = 2*(1 - scipy.stats.norm.cdf(abs(z)))
if alternative == 'less':
p = scipy.stats.norm.cdf(z)
if alternative == 'greater':
p = 1 - scipy.stats.norm.cdf(z)
return p
def my_proportions_diff_z_stat_ind(sample1, sample2):
n1 = len(sample1)
p1 = sum(sample1) / n1
n2 = len(sample2)
p2 = sum(sample2) / n2
P = (p1 * n1 + p2 * n2) / (n1 + n2)
Z = (p1 - p2) / np.sqrt(P * (1 - P) * (1 / n1 + 1 / n2))
return Z
def my_proportions_diff_z_test(z_stat, alternative='two-sided'):
if alternative not in ('two-sided', 'less', 'greater'):
raise ValueError('Недопустимое значения параметра "alternative"\n'
'допустимо: "two-sided", "less" или "greater"')
if alternative == 'two-sided':
return 2 * (1 - scipy.stats.norm.cdf(abs(z_stat)))
if alternative == 'less':
return scipy.stats.norm.cdf(z_stat)
if alternative == 'greater':
return 1 - scipy.stats.norm.cdf(z_stat)
def my_proportions_diff_z_stat_rel(sample1, sample2):
sample = list(zip(sample1, sample2))
n = len(sample)
f = 0
g = 0
for x in sample:
if x[0] == 1 and x[1] == 0:
f += 1
if x[0] == 0 and x[1] == 1:
g += 1
result = (f-g)/np.sqrt(f+g - ((f-g)**2)/n)
return result
def my_get_boostraps_samples(data, n_samples):
L = len(data)
indices = np.random.randint(0, L, (n_samples, L))
samples = data[indices]
return samples
def my_stat_intervals(stat, alpha=0.05):
low, high = np.percentile(stat, [100*alpha/2., 100*(1 - alpha/2.)])
return low, high
def my_permutation_test(sample1, sample2, max_permutations = None, alternative = 'two-sided'):
if alternative not in ['two-sided', 'less', 'greater']:
raise ValueError('Недопустимое значения параметра "alternative"\n'
'допустимо: "two-sided", "less" или "greater"')
t_stat = my_permutation_t_stat_ind(sample1, sample2)
zero_distr = my_permutation_zero_dist_ind(sample1, sample2, max_permutations)
if alternative == 'two-sided':
res = sum([1. if abs(x) >= abs(t_stat) else 0. for x in zero_distr])/len(zero_distr)
if alternative == 'less':
res = sum([1. if x <= t_stat else 0. for x in zero_distr])/len(zero_distr)
if alternative == 'greater':
res = sum([1. if x >= t_stat else 0. for x in zero_distr])/len(zero_distr)
return res
def my_get_random_combinations(n1, n2, max_combinations):
index = list(range(n1 + n2))
indices = set([tuple(index)])
for i in range(max_combinations - 1):
np.random.shuffle(index)
indices.add(tuple(index))
result = [(index[:n1], index[n1:]) for index in indices]
return result
def my_permutation_zero_dist_ind(sample1, sample2, max_combinations = None):
joined_sample = np.hstack((sample1, sample2))
n1 = len(sample1)
n2 = len(sample2)
n = len(joined_sample)
if max_combinations:
indices = my_get_random_combinations(n1, n2, max_combinations)
else:
indices = [(list(index), filter(lambda i: i not in index, range(n))) for index in itertools.combinations(range(n), n1)]
distr = [joined_sample[list(i[0])].mean() - joined_sample[list(i[1])].mean() for i in indices]
return distr
def my_permutation_t_stat_ind(sample1, sample2):
result = np.mean(sample1) - np.mean(sample2)
return result
print('2.4. Достигаемый уровень значимости для гипотезы, что среднее значение уровня кальция отличается от среднего:')
answer24 = round(my_p_value(expect_mean=9.5, std=0.4, n=160, sample_mean=9.57, alpha=0.95),4)
print('answer 2.4. = ' ,answer24)
df = pd.read_csv('diamonds.txt', sep='\t', header=0)
print(df.head())
data = df.drop('price', axis='columns')
# print(data.head())
target = df.price
# print(target.head())
train_data, test_data, train_target, test_target = model_selection.train_test_split(data, target,
test_size=0.25,
random_state=1)
estimator_1 = linear_model.LinearRegression()
estimator_1.fit(train_data, train_target)
predictions_1 = estimator_1.predict(test_data)
# print(predictions_1)
error_1 = metrics.mean_absolute_error(predictions_1, test_target)
print('Средняя абсолютная ошибка логистической регрессии: ', error_1)
errors_1 = abs(test_target - predictions_1)
# print(errors_1)
std_1 = errors_1.std(ddof=1)
estimator_2 = ensemble.RandomForestRegressor(n_estimators=10, random_state=1)
estimator_2.fit(train_data, train_target)
predictions_2 = estimator_2.predict(test_data)
# print(predictions_2)
error_2 = metrics.mean_absolute_error(predictions_2, test_target)
print('Средняя абсолютная ошибка случайного леса: ', error_2)
errors_2 = abs(test_target - predictions_2)
# print(errors_2)
std_2 = errors_2.std(ddof=1)
print('Критерий Стьюдената для проверки равенства средних двух связанных выборок:')
print(stats.ttest_rel(errors_1, errors_2))
print(stats.ttest_rel(abs(test_target - estimator_1.predict(test_data)),
abs(test_target - estimator_2.predict(test_data))))
print('Доверительный интервал для разности средних значений зависмых выборок:')
interval = DescrStatsW(errors_1 - errors_2).tconfint_mean()
print('[{}; {}]'.format(interval[0], interval[1]))
print('3.3. Достигаемый уровень значимости при альтернативе заразительности зевоты:')
n_test = 34
n_control = 16
test = np.array([1]*10 + [0]*(n_test-10))
control = np.array([1]*4 + [0]*(n_control-4))
z_stat = my_proportions_diff_z_stat_ind(test, control)
answer33 = round(my_proportions_diff_z_test(z_stat, alternative='greater'),4)
print('answer 3.3. = ', answer33)
df = pd.read_csv('banknotes.txt', header=0, sep='\t')
print(df.head())
data = df.drop('real', axis='columns')
print(data.head())
target = df['real']
train_data, test_data, train_target, test_target = model_selection.train_test_split(data, target,
test_size=0.25,
random_state=1)
train_data_2 = train_data.drop(['X1', 'X2', 'X3'], axis='columns')
train_data_1 = train_data.drop(['X4', 'X5', 'X6'], axis='columns')
test_data_2 = test_data.drop(['X1', 'X2', 'X3'], axis='columns')
test_data_1 = test_data.drop(['X4', 'X5', 'X6'], axis='columns')
estimator_1 = linear_model.LogisticRegression(solver='liblinear')
estimator_1.fit(train_data_1, train_target)
predictions_1 = estimator_1.predict(test_data_1)
# print(test_target)
print('predictions_1:')
print(predictions_1)
accuracy_1 = metrics.accuracy_score(test_target, predictions_1)
print('Доля ошибок первого классификатора: ', 1-accuracy_1)
errors_1 = [0 if a == b else 1 for a,b in zip(predictions_1,test_target)]
print('errors_1')
print(errors_1)
estimator_2 = linear_model.LogisticRegression(solver='liblinear')
estimator_2.fit(train_data_2, train_target)
predictions_2 = estimator_2.predict(test_data_2)
print('predictions_2:')
print(predictions_2)
accuracy_2 = metrics.accuracy_score(test_target, predictions_2)
print('Доля ошибок второго классификатора: ', 1-accuracy_2)
errors_2 = [0 if a == b else 1 for a,b in zip(predictions_2,test_target)]
print('errors_2')
print(errors_2)
p_value = my_proportions_diff_z_test(my_proportions_diff_z_stat_rel(errors_1, errors_2))
print('3.4. Значение достижимого уровня значимости: ', p_value)
print('3.5. Доверительный интервал для разности долей ошибок двух классификаторов:')
print(my_proportions_confint_diff_rel(errors_1, errors_2))
print('3.6. Достигаемый уровень значимости для гипотезы о неэффективности программы: ')
control_mean = 525
control_std = 100
test_mean = 541.4
test_n = 100
answer36 = round(my_p_value(expect_mean=525, std=100, n=100, sample_mean=541.4, alpha=0.95, alternative='greater'),4)
print('answer 3.6. = ', answer36)
print('3.7. Достигаемый уровень значимости для гипотезы о неэффективности программы (с увеличенным средним): ')
control_mean = 525
control_std = 100
test_mean = 541.5
test_n = 100
answer37 = round(my_p_value(control_mean, control_std, test_n, test_mean, alpha=0.95, alternative='greater'),4)
print('answer 3.7. = ', answer37)
sample = np.array([49,58,75,110,112,132,151,276,281,362]) - 200
print('4.4. Достижимый уровень значимости для критерия знаковых рангов против двусторонней альтернативы:')
answer44 = round(stats.wilcoxon(sample, mode='approx')[1],4)
print('answer 4.4. = ', answer44)
print('4.5. Достижимый уровень значимости для критерия знаковых рангов против односторонней альтернативы:')
sample1 = np.array([22,22,15,13,19,19,18,20,21,13,13,15])
sample2 = np.array([17,18,18,15,12,4,14,15,10])
answer45 = round(stats.mannwhitneyu(sample1, sample2)[1],4)
print('answer 4.5. = ', answer45)
print('4.6. 95% доверительный интервал для разности средних температур воздуха при запусках при помощи бустрепа:')
df = pd.read_csv('challenger.txt', sep='\t', header=0)
df.columns=['Date', 'Temperature', 'Incident']
# print(df.head())
my_ones = df.Temperature[df.Incident == 1].values
my_zeros = df.Temperature[df.Incident == 0].values
np.random.seed(0)
new_ones = my_get_boostraps_samples(my_ones, 1000)
new_zeros = my_get_boostraps_samples(my_zeros, 1000)
zeros_mean = np.mean(new_zeros, axis=1)
ones_mean = np.mean(new_ones, axis=1)
my_list = list(map(lambda x: x[1] - x[0], zip(ones_mean, zeros_mean)))
print(my_stat_intervals(my_list))
answer46 = round(my_stat_intervals(my_list)[0],4)
print('answer 4.6. = ', answer46)
print('4.7. Проверка нулевой гипотезы при помощи перестоновочного критерия:')
np.random.seed = 0
answer47 = round(my_permutation_test(my_ones, my_zeros, max_permutations=10000),4)
print('answer 4.7. = ', answer47) | RBVV23/Coursera | Построение выводов по данным/Week_2/sandbox_2.py | sandbox_2.py | py | 11,880 | python | en | code | 0 | github-code | 13 |
21091318947 | # coding=utf-8
from dataviz import Dataviz
from altair import Chart, load_dataset, X, Y
df = load_dataset('seattle-weather')
dataviz = Dataviz("Seattle Weather")
overview_chart = Chart(df).mark_bar(stacked='normalize').encode(
X('date:T', timeUnit='month'),
Y('count(*):Q'),
color='weather',
)
dataviz.add("commented", title="Overview", charts=[overview_chart],
comment= "Lorem ipsum dolor sit amet, cum pertinacia definitionem an. His ne oratio facilis voluptatum, nam lorem putant qualisque ad. Mea in affert nostrum. Mea cu ignota adipiscing. Omnis mnesarchum vix cu, omnes impedit democritum nec te. Malorum urbanitas consectetuer ei eam, no sea paulo tollit detracto."
)
chart_a = Chart(df).mark_bar().encode(
X('precipitation', bin=True),
Y('count(*):Q')
)
chart_b = Chart(df).mark_line().encode(
X('date:T', timeUnit='month'),
Y('average(precipitation)')
)
chart_c = Chart(df).mark_line().encode(
X('date:T', timeUnit='month'),
Y('average(temp_max)'),
)
dataviz.add("titled", title="Precipitations", charts=[chart_a, chart_b, chart_c])
dataviz.serve() | matteo-ronchetti/dataviz | test.py | test.py | py | 1,128 | python | en | code | 0 | github-code | 13 |
26945689783 | # -*- coding: utf-8 -*-
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from rq import Worker, Queue, Connection
import os
import redis
REDIS_URL = os.getenv('REDIS_URL', 'redis://localhost:6379')
listen = ['high', 'default', 'low']
conn = redis.from_url(REDIS_URL)
if __name__ == '__main__':
with Connection(conn):
worker = Worker(map(Queue, listen))
worker.work()
| bemau/BotyPy | worker.py | worker.py | py | 466 | python | en | code | 0 | github-code | 13 |
17049166424 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import json
from alipay.aop.api.constant.ParamConstants import *
class BeikeAccountResponse(object):
def __init__(self):
self._change_amount = None
self._current_amount = None
self._outer_biz_no = None
@property
def change_amount(self):
return self._change_amount
@change_amount.setter
def change_amount(self, value):
self._change_amount = value
@property
def current_amount(self):
return self._current_amount
@current_amount.setter
def current_amount(self, value):
self._current_amount = value
@property
def outer_biz_no(self):
return self._outer_biz_no
@outer_biz_no.setter
def outer_biz_no(self, value):
self._outer_biz_no = value
def to_alipay_dict(self):
params = dict()
if self.change_amount:
if hasattr(self.change_amount, 'to_alipay_dict'):
params['change_amount'] = self.change_amount.to_alipay_dict()
else:
params['change_amount'] = self.change_amount
if self.current_amount:
if hasattr(self.current_amount, 'to_alipay_dict'):
params['current_amount'] = self.current_amount.to_alipay_dict()
else:
params['current_amount'] = self.current_amount
if self.outer_biz_no:
if hasattr(self.outer_biz_no, 'to_alipay_dict'):
params['outer_biz_no'] = self.outer_biz_no.to_alipay_dict()
else:
params['outer_biz_no'] = self.outer_biz_no
return params
@staticmethod
def from_alipay_dict(d):
if not d:
return None
o = BeikeAccountResponse()
if 'change_amount' in d:
o.change_amount = d['change_amount']
if 'current_amount' in d:
o.current_amount = d['current_amount']
if 'outer_biz_no' in d:
o.outer_biz_no = d['outer_biz_no']
return o
| alipay/alipay-sdk-python-all | alipay/aop/api/domain/BeikeAccountResponse.py | BeikeAccountResponse.py | py | 2,030 | python | en | code | 241 | github-code | 13 |
15819949312 | import sys
from PyQt6.QtCore import Qt
from PyQt6.QtWidgets import QApplication, QWidget, QVBoxLayout, QLabel, QPushButton, QFileDialog, QMessageBox, QListWidget, QListWidgetItem, QAbstractItemView
from init import get_years, get_unique_values, filter_training_dataframe, create_test_dataframe, create_model, fit_model, predict_results, get_feature_importance
class ImportData(QWidget):
def __init__(self):
super().__init__()
self.setWindowTitle("Upload Data")
self.setFixedSize(800, 400)
self.layout = QVBoxLayout()
self.layout.setAlignment(Qt.AlignmentFlag.AlignCenter)
self.setLayout(self.layout)
self.file_label = QLabel("No file selected")
self.layout.addWidget(self.file_label)
self.browse_button = QPushButton("Browse")
self.browse_button.clicked.connect(self.open_file_dialog)
self.layout.addWidget(self.browse_button)
self.upload_button = QPushButton("Submit")
self.upload_button.clicked.connect(self.upload_data)
self.layout.addWidget(self.upload_button)
def open_file_dialog(self):
file_dialog = QFileDialog()
file_dialog.setFileMode(QFileDialog.FileMode.ExistingFile)
if file_dialog.exec():
selected_files = file_dialog.selectedFiles()
if selected_files:
file_path = selected_files[0]
self.file_label.setText(f"Selected file: {file_path}")
def upload_data(self):
file_path = self.file_label.text().replace("Selected file: ", "")
if file_path.endswith(".csv"):
# Process the CSV file here
# This is where calls to the database/server should go
QMessageBox.information(self, "Success", "File uploaded successfully!")
self.browse_button.hide()
self.upload_button.hide()
year_values, clean_cohort_df = get_years(file_path)
self.list_widget = QListWidget()
self.list_widget.setSelectionMode(QAbstractItemView.SelectionMode.ExtendedSelection) # Allow multiple selections
for value in year_values:
item = QListWidgetItem(str(value))
self.list_widget.addItem(item)
self.submit_courses_button = QPushButton("Submit")
self.submit_courses_button.clicked.connect(lambda: self.submit_courses(clean_cohort_df))
self.layout.addWidget(self.list_widget)
self.layout.addWidget(self.submit_courses_button)
self.layout.addSpacing(40)
else:
QMessageBox.critical(self, "Error", "Invalid file format. Please select a CSV file.")
def submit_courses(self, clean_cohort_df):
# Find the QListWidget in the parent widget
list_widget = self.findChild(QListWidget)
if list_widget:
selected_items = list_widget.selectedItems()
selected_values = [item.text() for item in selected_items]
if (len(selected_values) == 0):
QMessageBox.critical(self, "Error", "No cohort years were selected. Please try again.")
else:
unique_courses = get_unique_values(clean_cohort_df, selected_values)
self.list_widget.hide()
self.submit_courses_button.hide()
self.list_widget = QListWidget()
self.list_widget.setSelectionMode(QAbstractItemView.SelectionMode.ExtendedSelection) # Allow multiple selections
self.submit_training_button = QPushButton("Submit")
self.submit_training_button.clicked.connect(lambda: self.upload_new_data(clean_cohort_df, selected_values, self.list_widget.selectedItems()))
self.select_all_button = QPushButton("Select All")
self.select_all_button.clicked.connect(lambda: self.select_all_items())
for value in unique_courses:
item = QListWidgetItem(str(value))
self.list_widget.addItem(item)
self.layout.addWidget(self.list_widget)
self.layout.addWidget(self.submit_training_button)
self.layout.addWidget(self.select_all_button)
self.layout.addSpacing(40)
def select_all_items(self):
for index in range(self.list_widget.count()):
item = self.list_widget.item(index)
item.setSelected(True)
def upload_new_data(self, clean_cohort_df, selected_cohorts, selected_courses):
selected_courses = [item.text() for item in selected_courses]
if (len(selected_courses) == 0):
QMessageBox.critical(self, "Error", "No courses were selected. Please try again.")
else:
training_df = filter_training_dataframe(clean_cohort_df, selected_cohorts, selected_courses)
testing_df = create_test_dataframe(clean_cohort_df, selected_courses)
X_train, y_train, X_test, ID_df, feature_names = create_model(training_df, testing_df)
fitted_model = fit_model(X_train, y_train)
logresults = predict_results(fitted_model, X_test, ID_df)
feature_importance_df = get_feature_importance(fitted_model, feature_names)
QMessageBox.information(self, "Success", "Data uploated to database! This screen will now close.")
self.close()
if __name__ == "__main__":
app = QApplication(sys.argv)
upload_data = ImportData()
upload_data.show()
sys.exit(app.exec()) | EarlyInterventions/earlyinterventions | src/ImportData.py | ImportData.py | py | 5,633 | python | en | code | 0 | github-code | 13 |
34799712372 | # coding: utf-8
# @author: hongxin
# @date: 18-6-2
"""
参考文档: https://www.showdoc.cc/page/102098
"""
from requests import post
def check_article_suffix(article_path):
"""
检查文件格式是否为md
:param article_path:
:return:
"""
if article_path.split('.')[1] == 'md':
return True
else:
return None
def get_article_title(article_path):
path = article_path.split('.')[0]
title = path.split('/')[-1]
return title
def get_article_content(article_abs_path):
with open(article_abs_path, 'r') as read_article:
article_content = ''.join(read_article.readlines())
if '---\n' in article_content:
article_text = article_content.split('---')[2] # 正文
article_head = article_content.split('---')[1] # 题头
date = article_head.split('\n')[2]
date = date.replace('date', '__date__')
all_article_content = date + article_text
else:
all_article_content = article_content
return all_article_content
def generate_article(url, article_data):
request = post(url=url, data=article_data)
return request.text
def import_article_to_showdoc(api_url, api_key, api_token, article_dir, article_dir_sub, article_path):
"""
:param api_url: 上传url
:param api_key: 上传的key
:param api_token: 上传的token
:param article_dir: showdoc 一级目录
:param article_dir_sub: showdoc 二级目录
:param article_path: markdown路径
:return:
"""
if (api_url and api_key and api_token and article_dir and article_path) is not None:
if check_article_suffix(article_path):
# 一级,二级目录
cat_name, cat_name_sub = article_dir, article_dir_sub
# 获取文章标题
page_title = get_article_title(article_path)
# 获取文章内容
page_content = get_article_content(article_path)
# 文章排序
s_number = '99'
# post数据
post_datas = {'api_key': api_key, 'api_token': api_token, 'cat_name': cat_name,
'cat_name_sub': cat_name_sub,
'page_title': page_title, 'page_content': str(page_content), 's_number': s_number}
# post 请求
try:
response_text = generate_article(api_url, post_datas)
response_text = eval(response_text)
page_id = str(response_text['data']['page_id'])
print(page_id)
return True
except Exception:
return None
else:
print('article suffix is not md')
return None
else:
print('article path is not exist')
return None
if __name__ == '__main__':
# 测试
url = 'https://www.showdoc.cc/server/api/item/updateByApi'
key = 'xxx'
token = 'xxxx'
article = 'Other'
article_sub = ''
path = '/run/xxxx/Linux-1-Yum源介绍.md'
import_article_to_showdoc(url, key, token, article, article_sub, path)
| xiehongxin/ShowdocUpload | upload_markdown_module.py | upload_markdown_module.py | py | 3,132 | python | en | code | 1 | github-code | 13 |
41290424135 | from random import randint
def maior(* num):
print(num)
lista = []
ind = 0
for lis in range(0, randint(3, 10)):
lis = randint(1, 50)
print(lis)
lista.append(lis)
ind += 1
print(lista)
lista.sort()
quant = len(lista)
print(f'sua lista de numeros contém {quant} valores')
print(f'Os valores são: {lista}, e o maior valor é {lista[-1]}') | FernandoBoshy/Estudos-python | curso em video/ex099.py | ex099.py | py | 361 | python | pt | code | 0 | github-code | 13 |
24964222420 | import json
from rest_framework.decorators import api_view
from rest_framework import status
from rest_framework.response import Response
from .handleDB import *
from .serializers import *
@api_view(['POST'])
def register(request):
"""
{
"name": "Demo User8",
"email": "demouser8@gmail.com",
"college": "Yeshwantrao Chavan College of Engineering",
"key": "YCCE",
"mobile": 8888888888
}
"""
serializer = UserSerializer(data=request.data)
if serializer.is_valid():
data = serializer.data
college = data['college']
key = data['key']
user_data = {
'name': data['name'],
'email': data['email'],
'college': data['college'],
'mobile': data['mobile'],
}
user_id = data["email"].split("@")[0]
if check_id_exist(user_id) != 0:
print("EMAIL ALREADY EXIST")
return Response("EMAIL ALREADY EXIST", status=status.HTTP_400_BAD_REQUEST)
if check_college_exist(college) != 1:
print("COLLEGE DOES NOT EXIST")
return Response("COLLEGE DOES NOT EXIST", status=status.HTTP_400_BAD_REQUEST)
collegekey = get_college_key(college)
if (collegekey == -1):
print("KEY FINDING ERROR")
return Response("KEY FINDING ERROR", status=status.HTTP_401_UNAUTHORIZED)
if (key == collegekey):
print("MATCHED")
res = create_user(user_data, user_id)
if res != 1:
print("ERROR IN PUSHING USER DATA TO DB")
return Response("ERROR IN PUSHING USER DATA TO DB", status=status.HTTP_401_UNAUTHORIZED)
else:
return Response("REGISTERED SUCCESSFULLY", status=status.HTTP_201_CREATED)
else:
print("NOT MATCHED")
return Response("WRONG KEY", status=status.HTTP_401_UNAUTHORIZED)
else:
return Response("INVALID DATA", status=status.HTTP_400_BAD_REQUEST)
@api_view(['POST'])
def login(request):
"""
{
"email": "demouser8@gmail.com",
"college": "Yeshwantrao Chavan College of Engineering",
"key": "YCCE"
}
"""
serializer = UserLoginSerializer(data=request.data)
if serializer.is_valid():
data = serializer.data
email = data['email']
college = data['college']
key = data['key']
user_id = email.split("@")[0]
if (check_id_exist(user_id) != 1):
print("EMAIL DOES NOT EXIST")
return Response("EMAIL DOES NOT EXIST", status=status.HTTP_401_UNAUTHORIZED)
clg = get_college_name(user_id)
if (clg != college):
print("WRONG COLLEGE NAME")
return Response("WRONG COLLEGE NAME", status=status.HTTP_401_UNAUTHORIZED)
collegekey = get_college_key(college)
if (collegekey == -1):
print("KEY FINDING ERROR")
return Response("KEY FINDING ERROR", status=status.HTTP_401_UNAUTHORIZED)
elif (key == collegekey):
print("LOGGED IN SUCCESFULLY")
return Response("LOGGED IN SUCCESSFULLY", status=status.HTTP_200_OK)
else:
print("NOT MATCHED")
return Response("WRONG key", status=status.HTTP_401_UNAUTHORIZED)
else:
return Response("INVALID DATA", status=status.HTTP_400_BAD_REQUEST)
# helper function to generate analytics
def generate_test_analysis(email, uid):
correct_answers = get_correct_answers()
# ORIGINAL CODE Uncomment this
#user_responses = get_user_responses(email)
# if user_responses == None:
# return -1
# TEMPORARY CODE TO CHECK API --> Bhushan Wanjari
language_chosen1='c'
language_chosen2='python'
user_responses={}
i=0
for question_no in correct_answers:
i=i+1
subject = correct_answers[question_no]['subject']
topic = correct_answers[question_no]['topic']
# correct_ans = correct_answers[question_no]['answer']
if(subject=='language' and (topic!=language_chosen1 and topic!=language_chosen2)):
user_responses[question_no + 2]="" ## Putting Blank answer
elif(i%7==0 or i%5==0):
user_responses[question_no + 2]=correct_answers[question_no]['answer']
else:
user_responses[question_no + 2]='setting wrong answer'
### END
# DB Fields
scores = {}
level_wise_distribution = {}
topic_wise_distribution = {}
total_score = 0
## language skipped checker
lang={
"c":0,
"c++":0,
"python":0,
"java":0
}
lang_total={
"c":0,
"c++":0,
"python":0,
"java":0
}
for question_no in correct_answers:
subject = correct_answers[question_no]['subject']
topic = correct_answers[question_no]['topic']
if(subject=='language'):
# checking blank answers
lang_total[topic]=lang_total[topic]+1
if (user_responses[question_no + 2].strip() == ""):
lang[topic]=lang[topic]+1
skipped_lang=[]
if(lang_total['c']==lang['c']):
skipped_lang.append('c')
if(lang_total['c++']==lang['c++']):
skipped_lang.append('c++')
if(lang_total['python']==lang['python']):
skipped_lang.append('python')
if(lang_total['java']==lang['java']):
skipped_lang.append('java')
for question_no in correct_answers:
question = correct_answers[question_no]['question']
correct_ans = correct_answers[question_no]['answer']
subject = correct_answers[question_no]['subject']
topic = correct_answers[question_no]['topic']
difficulty = correct_answers[question_no]['level']
# Field check
if not subject in scores:
scores[subject] = 0
if not subject in level_wise_distribution:
level_wise_distribution[subject] = {
"hard": [0, 0, 0],
"medium": [0, 0, 0],
"easy": [0, 0, 0]
}
if not subject in topic_wise_distribution:
topic_wise_distribution[subject] = {}
if(subject!='language'):
if not topic in topic_wise_distribution[subject]:
topic_wise_distribution[subject][topic] = [0, 0, 0]
elif(subject=='language' and (topic!=skipped_lang[0] and topic!=skipped_lang[1])):
if not topic in topic_wise_distribution[subject]:
topic_wise_distribution[subject][topic] = [0, 0, 0]
if difficulty == "easy":
points = 2
elif difficulty == "medium":
points = 4
elif difficulty == "hard":
points = 6
else:
print(difficulty)
return -1
# DEBUGGING
print(question_no, correct_answers[question_no]["id"])
print(correct_ans.strip())
print(user_responses[question_no + 2].strip())
print(correct_ans.strip() == user_responses[question_no + 2].strip())
# correct then -> +2 bcoz first 3 columns are timestamp, email, score
if(subject=='language' and (topic==skipped_lang[0] or topic==skipped_lang[1])):
continue
elif (user_responses[question_no + 2].strip() == correct_ans.strip()):
# increment no. of correct ans
level_wise_distribution[subject][difficulty][1] += 1
topic_wise_distribution[subject][topic][1] += 1
total_score += points
scores[subject] += points
else:
# increment no. of incorrect ans
level_wise_distribution[subject][difficulty][2] += 1
topic_wise_distribution[subject][topic][2] += 1
# increment no. of total ques
level_wise_distribution[subject][difficulty][0] += 1
topic_wise_distribution[subject][topic][0] += 1
res = update_scored_db(total_score, scores, level_wise_distribution, topic_wise_distribution, uid)
if res == -1:
print("Total Score:", total_score)
print("Scores:", scores)
print("level_wise_distribution:", level_wise_distribution)
print("topic_wise_distribution:", topic_wise_distribution)
print("uid:", uid)
return -1
return 1
@api_view(['POST'])
def analytics(request):
"""
{
"email": "demouser6@gmail.com",
"subject" : "overall"
}
"""
serializer = AnalysisSerializer(data=request.data)
if serializer.is_valid():
email = serializer.data['email']
subject = serializer.data['subject']
user_id = email.split("@")[0]
# check if email exist
if check_id_exist(user_id) != 1:
return Response("NO USER FOUND", status=status.HTTP_404_NOT_FOUND)
res = check_analytics_exist(user_id)
# analytics not generated
if res != 1:
print("GENERATING ANALYTICS")
result = generate_test_analysis(email, user_id)
print(result)
if result == -1:
return Response("USER NOT SUBMITTED THE TEST", status=status.HTTP_404_NOT_FOUND)
else:
print("ANALYTICS GENERATED SUCCESSFULLY")
############# RETURNING JSON RESPONSE ///// ANALYSIS DATA
data = get_user_data(email)
name = data['name']
subject_scores = []
subject_labels = []
correct = []
incorrect = []
hard = medium = easy = total = 0
true_subject = ""
if (subject == 'overall'):
true_subject = subject
elif subject == 'cn':
true_subject = "Computer NetWorks"
elif subject == 'os':
true_subject = "Operating Systems"
elif subject == 'dbms':
true_subject = "Database Management System"
elif subject == 'dsa':
true_subject = "Data Structures and Algorithms"
elif subject == 'oops':
true_subject = "Object Oriented Programming"
else:
true_subject = subject
if (subject == 'overall'):
total = data['total_score']
for sub in data['level_wise_distribution']:
subject_labels.append(sub)
subject_scores.append(data['scores'][sub])
innerdata = data['level_wise_distribution'][sub]
hard += innerdata['hard'][0]
medium += innerdata['medium'][0]
easy += innerdata['easy'][0]
correct.append(innerdata['hard'][1] + innerdata['medium'][1] + innerdata['easy'][1])
incorrect.append(innerdata['hard'][2] + innerdata['medium'][2] + innerdata['easy'][2])
else:
hard = data['level_wise_distribution'][subject]['hard'][0]
medium = data['level_wise_distribution'][subject]['medium'][0]
easy = data['level_wise_distribution'][subject]['easy'][0]
total = hard + easy + medium
for topic in data['topic_wise_distribution'][subject]:
subject_labels.append(topic)
innerdata = data['topic_wise_distribution'][subject][topic]
correct.append(innerdata[1])
incorrect.append(innerdata[2])
subject_scores.append(innerdata[0])
Negative_Incorrects = []
for i in incorrect:
Negative_Incorrects.append(-1 * i)
returndata = {
'name': name,
'total': total,
'subject': true_subject,
'leetcode': {
'series': [hard, medium, easy],
'labels': ["Hard", "Medium", "Easy"],
},
'stackgraph': {
'series': [
{
'name': "Correct",
'data': correct,
},
{
'name': "Incorrect",
'data': Negative_Incorrects,
},
],
'labels': subject_labels,
},
'linegraph': {
'labels': subject_labels,
'series': [
{
'name': "Subjects",
'data': subject_scores,
},
],
},
'piechart': {
'series': subject_scores,
'labels': subject_labels,
},
}
return Response(returndata)
else:
return Response("Invalid data", status=status.HTTP_400_BAD_REQUEST)
@api_view(['POST'])
def ranklist(request):
"""
{
"college" : "Shri Ramdeobaba College of Engineering and Management"
}
"""
serializer = CollegeRankListSerializer(data = request.data)
if serializer.is_valid():
college = serializer.data['college']
lst = get_college_ranklist(college)
data = {
"ranklist": lst
}
return Response(data, status=status.HTTP_200_OK)
return Response(status=status.HTTP_400_BAD_REQUEST)
@api_view(['GET'])
def globalranklist(request):
lst = get_global_ranklist()
data = {
"ranklist": lst
}
return Response(data, status=status.HTTP_200_OK)
@api_view(['GET'])
def question_bank(request):
questions = get_all_questions()
return Response({"data": questions}, status=status.HTTP_200_OK)
@api_view(['GET'])
def test_link(request):
testlink = get_test_link()
return Response({"link": testlink}, status=status.HTTP_200_OK)
@api_view(['GET'])
def college_list(request):
college_names = list(sorted(get_all_colleges()))
return Response({"clg_names" : college_names}, status=status.HTTP_200_OK)
@api_view(['POST'])
def weakest_topics(request):
"""
{
"email" : "demouser6@gmail.com"
}
"""
serializer = EmailSerializer(data=request.data)
if serializer.is_valid():
ser_data = serializer.data
email = ser_data['email']
user_id = email.split("@")[0]
user_data = get_user_data(email)
subject_list = []
topic_list = []
# calculating 85% score benchmark from any random topic
questions = db.collection("ques_bank").get()
question1 = questions[0].to_dict()
question1_subject = question1['subject']
question1_topic = question1['topic']
tpoic_questions = db.collection("ques_bank").where(u'subject', u'==', question1_subject).where(u'topic', u'==', question1_topic).get()
score = 0
for question in tpoic_questions:
dict = question.to_dict()
if(dict['level']=='easy'):
score=score+2
elif(dict['level']=='medium'):
score=score+4
elif(dict['level']=='hard'):
score=score+6
score_85 = score*0.85
print(score_85)
subjects = user_data["topic_wise_distribution"]
for subject in subjects.keys():
topics = subjects[subject]
var = score_85
weak_topic = ""
for topic in topics.keys():
mark = topics[topic][0]
if (mark < var):
var = mark
weak_topic = topic
# if there is no weak topic in subject, subject will not be added
if(weak_topic!=""):
subject_list.append(subject)
topic_list.append(weak_topic)
print(subject_list)
print(topic_list)
dict = {}
sz = len(subject_list)
for i in range(0, sz):
dict.update({subject_list[i]: topic_list[i]})
return Response(dict, status=status.HTTP_200_OK)
return Response("INVALID DATA (ISSUE IN SERIALIZATION)", status=status.HTTP_400_BAD_REQUEST)
@api_view(['POST'])
def subjectranklist(request):
"""
{
"subject" : "dbms"
}
"""
data = {}
serializer = SubjectRanklistSerializer(data = request.data)
if serializer.is_valid():
subject = serializer.data['subject']
lst = get_subject_ranklist(subject)
print(subject)
data = {
"ranklist": lst
}
return Response(data, status=status.HTTP_200_OK)
return Response(data, status=status.HTTP_400_BAD_REQUEST)
"""
@api_view(['POST'])
def weakest_topics(request):
""
{
"email" : "demouser7@gmail.com"
}
""
serializer = EmailSerializer(data=request.data)
if serializer.is_valid():
ser_data = serializer.data
email = ser_data['email']
user_id = email.split("@")[0]
if check_id_exist(user_id) != 1:
print("EMAIL DOES NOT EXIST")
return Response("EMAIL DOES NOT EXIST", status=status.HTTP_400_BAD_REQUEST)
user_data = get_user_data(email)
subjects = user_data["topic_wise_distribution"]
subject_list = []
topic_list = []
for subject in subjects.keys():
subject_list.append(subject)
topics = subjects[subject]
var = 999999
weak_topic = ""
for topic in topics.keys():
mark = topics[topic][0]
if (mark < var):
var = mark
weak_topic = topic
topic_list.append(weak_topic)
print(subject_list)
print(topic_list)
dict = {}
sz = len(subject_list)
for i in range(0, sz):
dict.update({subject_list[i]: topic_list[i]})
return Response(dict, status=status.HTTP_200_OK)
return Response("INVALID DATA (ISSUE IN SERIALIZATION)", status=status.HTTP_400_BAD_REQUEST)
def searching_rank(ranklist, email): # iterating through the entire list to fetch the rank
i = 1
rank = -1
for data in ranklist:
l1 = ranklist[data]
for p in l1:
# print(p)
if p['email'] == email:
rank = i
i += 1
return rank
def Sort_Tuple(tup):
return(sorted(tup, key = lambda x: x[0]))
def generate_ranklist_for_each_subject(email, college , global_ranklist):
data = get_user_data(email)
subject_list = []
subject_rank = {
'CN': {
'score': [],
'email': []
},
'OS': {
'score': [],
'email': []
},
'DBMS': {
'score': [],
'email': []
},
'OOPS': {
'score': [],
'email': []
},
'LOGICAL': {
'score': [],
'email': []
},
'QUANTITATIVE': {
'score': [],
'email': []
},
'DSA': {
'score': [],
'email': []
},
'VERBAL': {
'score': [],
'email': []
},
'OVERALL': {
'score': [],
'email': []
},
}
for subjects in data['level_wise_distribution']:
subject_list.append(subjects.upper())
subject_list.append("OVERALL")
for i in global_ranklist:
for fields in global_ranklist[i]:
lst_score = fields['scores']
actual_list = {'CN': 0, 'OS': 0, 'DBMS': 0, 'OOPS': 0, 'DSA': 0, 'LOGICAL': 0, 'QUANTITATIVE': 0,
'VERBAL': 0 , 'OVERALL' : 0}
for key in lst_score:
actual_list[key.upper()] = lst_score[key]
actual_list["OVERALL"] = fields['total_score']
for subject_name in subject_list:
subject_rank[subject_name]['score'].append(actual_list[subject_name])
subject_rank[subject_name]['email'].append(fields['email'])
return (subject_rank)
def generate_list(subject_wise_rankListG , subject , email):
subject_rank_list = list((zip(subject_wise_rankListG[subject.upper()]['score'] , zip(subject_wise_rankListG[subject.upper()]['email']))))
Sort_Tuple(subject_rank_list)
new_list = Sort_Tuple(subject_rank_list)
new_list.reverse()
subject_rank_list = new_list
subject_rank_dict = {}
for i in range(1 , len(subject_rank_list) + 1):
subject_rank_dict[i] = {'score' : 0 , 'email' : "" }
i = 1
global_subject_rank = -1
subject_marks_user = -1
for it in subject_rank_list:
subject_rank_dict[i] = {'score' : it[0] , 'email' : it[1][0]}
if( it[1][0] == email):
global_subject_rank = i
subject_marks_user = it[0]
i+=1
for it in subject_rank_dict:
email_id = subject_rank_dict[it]['email']
user_data = get_user_data(email_id)
user_college = user_data['college']
user_name = user_data['name']
subject_rank_dict[it]['name'] = user_name
subject_rank_dict[it]['college'] = user_college
return subject_rank_dict
@api_view(['POST'])
def get_user_ranklist_data(request):
""
{
"email" : "demouser7@gmail.com",
"rank_subject" : "overall"
Returns a dict containing
{
subject = "rank_subject",
college_name = "user_college",
college_rank = college rank of user,
global_rank = global rank of user,
global_list = {
"1" : {
"score" : ,
"email": "user_email",
"name": "user_name",
"college": "user_college"
}
}
college_list = {
"1" : {
"score" : ,
"email": "user_email",
"name": "user_name",
"college": "user_college"
}
}
}
}
""
serializer = ranklistSerializer(data=request.data)
if serializer.is_valid():
email = serializer.data['email']
rank_subject = serializer.data['rank_subject']
user_id = email.split("@")[0]
if check_id_exist(user_id) == 1:
user_data = get_user_data(email)
user_college = user_data['college']
user_global_ranklist = get_global_ranklist() # fetching global Rank-list
user_college_ranklist = get_college_ranklist(user_college) # fetching college Rank-list
college_rank = searching_rank(user_college_ranklist, email)
global_rank = searching_rank(user_global_ranklist, email)
subject_wise_rankListG = generate_ranklist_for_each_subject(email, user_college , user_global_ranklist)
subject_wise_rankListC = generate_ranklist_for_each_subject(email, user_college , user_college_ranklist)
subject = rank_subject
subject_rank_dict_G = generate_list(subject_wise_rankListG , subject , email)
subject_rank_dict_C = generate_list(subject_wise_rankListC , subject , email)
user_ranklist_data = {
'subject' : subject,
'college_name': user_college,
'college_rank': college_rank,
'global_rank': global_rank,
'global_list': subject_rank_dict_G,
'college_list': subject_rank_dict_C,
}
return Response(user_ranklist_data, status=status.HTTP_200_OK)
print("EMAIL DOES NOT EXIST")
return Response("EMAIL DOES NOT EXIST", status=status.HTTP_400_BAD_REQUEST)
return Response("INVALID DATA (ISSUE IN SERIALIZATION)", status=status.HTTP_400_BAD_REQUEST)
@api_view(['POST'])
def courses_promotion(request):
""
{
"email" : "demouser7@gmail.com"
Returns a dict containing
{
"core": [
"topic 3 of subject cn",
"cn"
],
"sde_bootcamp": [
"topic 2 of subject dsa",
"dsa"
],
"apti": [
"topic 2 of subject logical",
"logical"
]
}
}
""
serializer = EmailSerializer(data=request.data)
if serializer.is_valid():
serz_data = serializer.data
email = serz_data['email']
user_id = email.split("@")[0]
if check_id_exist(user_id) != 1:
print("EMAIL DOES NOT EXIST")
return Response("EMAIL DOES NOT EXIST", status=status.HTTP_400_BAD_REQUEST)
user_data = get_user_data(email)
subjects = user_data["topic_wise_distribution"]
core_topic=""
core_subject=""
sde_bootcamp_topic=""
sde_bootcamp_subject=""
apti_topic=""
apti_subject=""
min_score = get_85percent_score()
# print(min_score)
for subject in subjects.keys():
if(subject=='oops' or subject=='os' or subject=='cn' or subject=='dbms' ):
var = min_score
topics = subjects[subject]
for topic in topics.keys():
mark = topics[topic][0]
if(mark < var):
core_topic=topic
core_subject=subject
var=mark
elif(subject=='dsa'):
var = min_score
topics = subjects[subject]
for topic in topics.keys():
mark = topics[topic][0]
if(mark < var):
sde_bootcamp_topic=topic
sde_bootcamp_subject=subject
var=mark
elif(subject=='verbal' or subject=='quantitative' or subject=='logical'):
var = min_score
topics = subjects[subject]
for topic in topics.keys():
mark = topics[topic][0]
if(mark < var):
apti_topic=topic
apti_subject=subject
var=mark
dict={}
if(core_topic != "" and core_subject!=""):
arr=[core_topic,core_subject]
dict['core']=arr
if(sde_bootcamp_topic != "" and sde_bootcamp_subject!=""):
arr=[sde_bootcamp_topic,sde_bootcamp_subject]
dict['sde_bootcamp']=arr
if(apti_topic != "" and apti_subject!=""):
arr=[apti_topic,apti_subject]
dict['apti']=arr
return Response(dict, status=status.HTTP_200_OK)
return Response("INVALID DATA (ISSUE IN SERIALIZATION)", status=status.HTTP_400_BAD_REQUEST)
""" | Rohitbhojwani/o1analysis | apti_backend/apti_backend/views.py | views.py | py | 26,887 | python | en | code | null | github-code | 13 |
70054678739 | # pylint: skip-file
def main():
'''
ansible oc module for registry
'''
module = AnsibleModule(
argument_spec=dict(
state=dict(default='present', type='str',
choices=['present', 'absent']),
debug=dict(default=False, type='bool'),
namespace=dict(default='default', type='str'),
name=dict(default=None, required=True, type='str'),
kubeconfig=dict(default='/etc/origin/master/admin.kubeconfig', type='str'),
credentials=dict(default='/etc/origin/master/openshift-registry.kubeconfig', type='str'),
images=dict(default=None, type='str'),
latest_images=dict(default=False, type='bool'),
labels=dict(default=None, type='list'),
ports=dict(default=['5000'], type='list'),
replicas=dict(default=1, type='int'),
selector=dict(default=None, type='str'),
service_account=dict(default='registry', type='str'),
mount_host=dict(default=None, type='str'),
registry_type=dict(default='docker-registry', type='str'),
template=dict(default=None, type='str'),
volume=dict(default='/registry', type='str'),
env_vars=dict(default=None, type='dict'),
volume_mounts=dict(default=None, type='list'),
edits=dict(default=None, type='dict'),
force=dict(default=False, type='bool'),
),
mutually_exclusive=[["registry_type", "images"]],
supports_check_mode=True,
)
rconfig = RegistryConfig(module.params['name'],
module.params['namespace'],
module.params['kubeconfig'],
{'credentials': {'value': module.params['credentials'], 'include': True},
'default_cert': {'value': None, 'include': True},
'images': {'value': module.params['images'], 'include': True},
'latest_images': {'value': module.params['latest_images'], 'include': True},
'labels': {'value': module.params['labels'], 'include': True},
'ports': {'value': ','.join(module.params['ports']), 'include': True},
'replicas': {'value': module.params['replicas'], 'include': True},
'selector': {'value': module.params['selector'], 'include': True},
'service_account': {'value': module.params['service_account'], 'include': True},
'registry_type': {'value': module.params['registry_type'], 'include': False},
'mount_host': {'value': module.params['mount_host'], 'include': True},
'volume': {'value': module.params['mount_host'], 'include': True},
'template': {'value': module.params['template'], 'include': True},
'env_vars': {'value': module.params['env_vars'], 'include': False},
'volume_mounts': {'value': module.params['volume_mounts'], 'include': False},
'edits': {'value': module.params['edits'], 'include': False},
})
ocregistry = Registry(rconfig)
state = module.params['state']
########
# Delete
########
if state == 'absent':
if not ocregistry.exists():
module.exit_json(changed=False, state="absent")
if module.check_mode:
module.exit_json(changed=False, msg='Would have performed a delete.')
api_rval = ocregistry.delete()
module.exit_json(changed=True, results=api_rval, state="absent")
if state == 'present':
########
# Create
########
if not ocregistry.exists():
if module.check_mode:
module.exit_json(changed=False, msg='Would have performed a create.')
api_rval = ocregistry.create()
if api_rval['returncode'] != 0:
module.fail_json(msg=api_rval)
module.exit_json(changed=True, results=api_rval, state="present")
########
# Update
########
if not module.params['force'] and not ocregistry.needs_update():
module.exit_json(changed=False, state="present")
if module.check_mode:
module.exit_json(changed=False, msg='Would have performed an update.')
api_rval = ocregistry.update()
if api_rval['returncode'] != 0:
module.fail_json(msg=api_rval)
module.exit_json(changed=True, results=api_rval, state="present")
module.exit_json(failed=True,
changed=False,
results='Unknown state passed. %s' % state,
state="unknown")
# pylint: disable=redefined-builtin, unused-wildcard-import, wildcard-import, locally-disabled
# import module snippets. This are required
from ansible.module_utils.basic import *
main()
| openshift/openshift-tools | ansible/roles/lib_openshift_3.2/build/ansible/oadm_registry.py | oadm_registry.py | py | 5,123 | python | en | code | 161 | github-code | 13 |
7471754900 | import math
import torch
import torch.optim as optim
from .utils.kfac_utils import (ComputeCovA, ComputeCovG)
from utils.timing import Timer
from .utils.factors import ComputeI, ComputeG
from .utils.hylo_utils import EmptyBackend
def randomized_svd(B, rank):
if rank < 1:
rank = int(rank * min(B.size()))
m, n = B.size()
rand_matrix = torch.rand((n, rank)).to(B.device) # short side by k
Q, _ = torch.linalg.qr(B @ rand_matrix) # long side by k
smaller_matrix = (Q.transpose(0, 1) @ B) # k by short side
U_hat, s, V = torch.svd(smaller_matrix, False)
U = (Q @ U_hat)
Us = U @ torch.diag(s)
Vt = V[:, :rank].t()
return Us, Vt
class HKOROptimizer(optim.Optimizer):
def __init__(self,
model,
lr=0.001,
momentum=0.9,
stat_decay=0.95,
damping=0.001,
kl_clip=0.001,
weight_decay=0,
inv_freq=10,
batch_averaged=True,
measure_time=False,
svd=False,
backend=EmptyBackend(),
half_precision=True):
if lr < 0.0:
raise ValueError("Invalid learning rate: {}".format(lr))
if momentum < 0.0:
raise ValueError("Invalid momentum value: {}".format(momentum))
if weight_decay < 0.0:
raise ValueError("Invalid weight_decay value: {}".format(weight_decay))
defaults = dict(lr=lr, momentum=momentum, damping=damping,
weight_decay=weight_decay)
# TODO (CW): KFAC optimizer now only support model as input
super(HKOROptimizer, self).__init__(model.parameters(), defaults)
self.CovAHandler = ComputeCovA()
self.CovGHandler = ComputeCovG()
self.AHandler = ComputeI()
self.GHandler = ComputeG()
self.batch_averaged = batch_averaged
self.backend = backend
self.verbose = self.backend.rank() == 0
self.known_modules = {'Linear', 'Conv2d'}
self.modules = []
self.grad_outputs = {}
self.model = model
self.a_reset_factor = {}
self.g_reset_factor = {}
self._prepare_model()
self.steps = 0
self.m_aa, self.m_gg = {}, {}
self.Q_a, self.Q_g = {}, {}
self.d_a, self.d_g = {}, {}
self.AA_inv, self.GG_inv = {}, {}
self.AA_Us, self.AA_Vt = {}, {}
self.GG_Us, self.GG_Vt = {}, {}
self.AA_sparse_factor, self.GG_sparse_factor = {}, {}
self.AA, self.GG = {}, {}
self.stat_decay = stat_decay
self.kl_clip = kl_clip
self.inv_freq = inv_freq
# Timing Variables
self.timer = Timer(measure=measure_time)
self.svd = svd
self.reset_factors_freq = 10
self.inputs = {}
self.input_shapes = {}
self.inputs_reduced = False
self.grads = {}
self.grad_shapes = {}
self.data_type = torch.float16 if half_precision else torch.float32
self.manual_reset_factors = False
if self.manual_reset_factors:
self.reset_weight = 0.8
else:
self.reset_weight = 0.1
self.error_average_list = []
self.error_svd_list = []
self.sgd = False
self.rank = 1
self.sparse = False
self.sparse_threshold = 1e-2
self.sparse_AA, self.sparse_GG = {}, {}
self.dummy_timer_start = torch.cuda.Event(enable_timing=True)
self.dummy_timer_end = torch.cuda.Event(enable_timing=True)
def inverse(self, prev_inv, rank_1):
tmp1 = (prev_inv @ rank_1)
tmp2 = (rank_1.t() @ prev_inv)
return prev_inv - 1 / (1 + tmp2 @ rank_1) * tmp1 @ tmp2
def _save_input(self, module, input):
if torch.is_grad_enabled() and (self.steps % self.inv_freq == 0 or self.steps < 10) and not self.sgd:
a = self.AHandler(input[0].data, module)
self.inputs[module] = a.to(self.data_type)
def _save_grad_output(self, module, grad_input, grad_output):
# Accumulate statistics for Fisher matrices
if (self.steps % self.inv_freq == 0 or self.steps < 10) and not self.sgd:
if not self.inputs_reduced:
self.reduce_inputs()
self.inputs_reduced = True
g, _ = self.GHandler(grad_output[0].data, module)
self.grads[module] = g.to(self.data_type)
def _prepare_model(self):
count = 0
if self.verbose:
print("=> We keep following layers in KFAC. ")
for module in self.model.modules():
classname = module.__class__.__name__
# print('=> We keep following layers in KFAC. <=')
if classname in self.known_modules:
self.modules.append(module)
module.register_forward_pre_hook(self._save_input)
module.register_backward_hook(self._save_grad_output)
self.a_reset_factor[module] = True
self.g_reset_factor[module] = True
if self.verbose:
print('(%s): %s' % (count, module))
count += 1
def _update_inv(self, m):
"""Do eigen decomposition for computing inverse of the ~ fisher.
:param m: The layer
:return: no returns.
"""
eps = 1e-10 # for numerical stability
if torch.any(torch.isnan(self.m_aa[m])) or torch.any(torch.isnan(self.m_gg[m])):
raise ValueError("NaN detected in m_aa or m_gg")
self.d_a[m], self.Q_a[m] = torch.linalg.eigh(
self.m_aa[m] + eps / 10 * torch.eye(self.m_aa[m].shape[0], device=self.m_aa[m].device))
self.d_g[m], self.Q_g[m] = torch.linalg.eigh(
self.m_gg[m] + eps / 10 * torch.eye(self.m_gg[m].shape[0], device=self.m_gg[m].device))
# print(min(torch.min(self.d_a[m]), torch.min(self.d_g[m])))
self.d_a[m].mul_((self.d_a[m] > eps).float())
self.d_g[m].mul_((self.d_g[m] > eps).float())
@staticmethod
def _get_matrix_form_grad(m, classname):
"""
:param m: the layer
:param classname: the class name of the layer
:return: a matrix form of the gradient. it should be a [output_dim, input_dim] matrix.
"""
if classname == 'Conv2d':
p_grad_mat = m.weight.grad.data.view(m.weight.grad.data.size(0), -1) # n_filters * (in_c * kw * kh)
else:
p_grad_mat = m.weight.grad.data
if m.bias is not None:
p_grad_mat = torch.cat([p_grad_mat, m.bias.grad.data.view(-1, 1)], 1)
return p_grad_mat
def _get_natural_grad(self, m, p_grad_mat, damping, identity=False):
"""
:param m: the layer
:param p_grad_mat: the gradients in matrix form
:return: a list of gradients w.r.t to the parameters in `m`
"""
# p_grad_mat is of output_dim * input_dim
# inv((ss')) p_grad_mat inv(aa') = [ Q_g (1/R_g) Q_g^T ] @ p_grad_mat @ [Q_a (1/R_a) Q_a^T]
# v1 = self.Q_g[m].t() @ p_grad_mat @ self.Q_a[m]
# v2 = v1 / (self.d_g[m].unsqueeze(1) * self.d_a[m].unsqueeze(0) + damping)
# v = self.Q_g[m] @ v2 @ self.Q_a[m].t()
if identity:
v = p_grad_mat
else:
# self.dummy_timer_start.record()
if self.rank == 1:
if self.sparse:
v = self.GG_sparse_factor[m] @ p_grad_mat @ self.AA_sparse_factor[m]
else:
v = self.GG_inv[m].to(torch.float32) @ p_grad_mat @ self.AA_inv[m].to(torch.float32)
else:
v = self.rank * (self.GG_Us[m] @ (self.GG_Vt[m] @ p_grad_mat @ self.AA_Us[m]) @ self.AA_Vt[m]) + (1 - self.rank) * p_grad_mat
# self.dummy_timer_end.record()
# torch.cuda.synchronize()
# print('dummy time: ', self.dummy_timer_start.elapsed_time(self.dummy_timer_end))
if m.bias is not None:
# we always put gradient w.r.t weight in [0]
# and w.r.t bias in [1]
v = [v[:, :-1], v[:, -1:]]
v[0] = v[0].view(m.weight.grad.data.size())
v[1] = v[1].view(m.bias.grad.data.size())
else:
v = [v.view(m.weight.grad.data.size())]
return v
def _kl_clip_and_update_grad(self, updates, lr):
# do kl clip
vg_sum = 0
for m in self.modules:
v = updates[m]
vg_sum += (v[0] * m.weight.grad.data * lr ** 2).sum().item()
if m.bias is not None:
vg_sum += (v[1] * m.bias.grad.data * lr ** 2).sum().item()
nu = min(1.0, math.sqrt(self.kl_clip / vg_sum))
for m in self.modules:
v = updates[m]
m.weight.grad.data.copy_(v[0])
m.weight.grad.data.mul_(nu)
if m.bias is not None:
m.bias.grad.data.copy_(v[1])
m.bias.grad.data.mul_(nu)
def _step(self, closure):
# FIXME (CW): Modified based on SGD (removed nestrov and dampening in momentum.)
# FIXME (CW): 1. no nesterov, 2. buf.mul_(momentum).add_(1 <del> - dampening </del>, d_p)
for group in self.param_groups:
weight_decay = group['weight_decay']
momentum = group['momentum']
for p in group['params']:
if p.grad is None:
continue
d_p = p.grad.data
if weight_decay != 0 and self.steps >= 20 * self.inv_freq:
d_p.add_(weight_decay, p.data)
if momentum != 0:
param_state = self.state[p]
if 'momentum_buffer' not in param_state:
buf = param_state['momentum_buffer'] = torch.zeros_like(p.data)
buf.mul_(momentum).add_(d_p)
else:
buf = param_state['momentum_buffer']
buf.mul_(momentum).add_(1, d_p)
d_p = buf
p.data.add_(-group['lr'], d_p)
if torch.isnan(p.data).any():
print('nan')
exit()
def reduce_inputs(self):
inputs = []
# self.error_average_list = []
# self.error_svd_list = []
for module in self.modules:
a = self.inputs[module]
if self.svd:
U, S, V = torch.linalg.svd(a, full_matrices=False)
# average = torch.mean(a, dim=0, keepdim=True)
# exact_mat = a.t() @ a / torch.tensor(a.shape[0])
a = (V[0, :].reshape(-1, 1) * S[0] * torch.sum(U[:, 0] ** 2)).t() / torch.sqrt(torch.tensor(a.shape[0]))
# error_average = torch.norm((average.t() @ average) - exact_mat)
# error_svd = torch.norm((a.t() @ a) - exact_mat)
# self.error_average_list.append(error_average)
# self.error_svd_list.append(error_svd)
else:
# exact_mat = a.t() @ a / torch.tensor(a.shape[0])
a = torch.mean(a, dim=0, keepdim=True)
# error_average = torch.norm((a.t() @ a) - exact_mat) / torch.norm(exact_mat)
# self.error_average_list.append(error_average)
# self.error_svd_list.append(torch.zeros(1))
self.inputs[module] = a
if self.backend.size() != 1:
inputs.append(self.inputs[module].reshape(1, -1))
# import os
# if not os.path.exists('error.csv'):
# with open('error.csv', 'w') as f:
# f.write('error_svd,error_mean\n')
# with open('error.csv', 'a') as f:
# f.write(f'{torch.mean(torch.tensor(self.error_svd_list))},{torch.mean(torch.tensor(self.error_average_list))}\n')
if self.backend.size() == 1:
return
self.reduced_inputs = torch.cat(inputs, dim=1)
self.input_handles = []
self.input_handles.append(self.backend.allreduce(self.reduced_inputs, async_op=True, average=True))
def reduce_grads(self):
grads = []
for module in self.modules:
g = self.grads[module]
if self.svd:
U, S, V = torch.linalg.svd(g, full_matrices=False)
g = (V[0, :].reshape(-1, 1) * S[0] * torch.sum(U[:, 0] ** 2)).t()
else:
g = torch.mean(g, dim=0, keepdim=True)
self.grads[module] = g
if self.backend.size() != 1:
grads.append(self.grads[module].reshape(1, -1))
if self.backend.size() == 1:
return
self.reduced_grads = torch.cat(grads, dim=1)
self.grad_handles = []
self.grad_handles.append(self.backend.allreduce(self.reduced_grads, async_op=True, average=True))
def sync_inputs(self):
self.inputs_reduced = False
if self.backend.size() == 1:
return
self.backend.sync(self.input_handles)
offset = 0
for module in self.modules:
input_shape = self.inputs[module].shape
input_numel = self.inputs[module].numel()
self.inputs[module] = self.reduced_inputs[0, offset:offset + input_numel].reshape(input_shape)
offset += input_numel
def sync_grad(self):
if self.backend.size() == 1:
return
self.backend.sync(self.grad_handles)
offset = 0
for module in self.modules:
grad_shape = self.grads[module].shape
grad_numel = self.grads[module].numel()
self.grads[module] = self.reduced_grads[0, offset:offset + grad_numel].reshape(grad_shape)
offset += grad_numel
def update_factors(self):
self.sync_inputs()
for module in self.modules:
if module == self.modules[-1]:
continue
a = self.inputs[module]
v = a.t()
# print("Forward Error", torch.norm(a.t() @ a - v @ v.t()) / torch.norm(a.t() @ a), S / torch.sum(S))
if self.a_reset_factor[module]:
if module not in self.AA_inv:
# self.AA[module] = torch.eye(a.size(1)).to(a.device)
self.AA_inv[module] = torch.eye(a.size(1), device=a.device, dtype=self.data_type)
else:
self.AA_inv[module] = self.AA_inv[module] * (1 - self.reset_weight) + self.reset_weight * torch.eye(
a.size(1), device=a.device, dtype=self.data_type)
self.a_reset_factor[module] = False
self.method = 'approx'
if self.method == 'exact':
self.AA_inv[module] = torch.inverse(
a.t() @ a * (1 - self.stat_decay) + self.stat_decay * self.AA[module])
elif self.method == 'low_rank':
self.AA_inv[module] = torch.inverse(
v @ v.t() * (1 - self.stat_decay) + self.stat_decay * self.AA[module])
elif self.method == 'approx':
self.AA_inv[module] = self.inverse(self.AA_inv[module] / self.stat_decay,
v * math.sqrt(1 - self.stat_decay))
# if self.verbose:
# print(torch.max(torch.abs(self.AA_inv[module].flatten())), torch.max(torch.abs(v)))
if self.manual_reset_factors:
self.a_reset_factor[module] = self.steps % (self.inv_freq * self.reset_factors_freq) == 0
self.g_reset_factor[module] = self.steps % (self.inv_freq * self.reset_factors_freq) == 0
else:
if torch.max(torch.abs(self.AA_inv[module].flatten())) > 2:
self.a_reset_factor[module] = True
self.g_reset_factor[module] = True
if self.rank != 1:
self.AA_Us[module], self.AA_Vt[module] = randomized_svd(self.AA_inv[module].to(torch.float32), self.rank)
else:
if self.sparse:
if self.sparsify[module]:
mask = (self.AA_inv[module].abs() > self.sparse_threshold).to(torch.int)
self.sparse_AA[module] = True
self.AA_sparse_factor[module] = (self.AA_inv[module] * mask).to(torch.float32).to_sparse_csr().to(self.AA_inv[module].device)
# print(mask.sum() / mask.numel(), mask.shape)
else:
self.sparse_AA[module] = False
self.AA_sparse_factor[module] = self.AA_inv[module].to(torch.float32)
self.sync_grad()
for module in self.modules:
if module == self.modules[-1]:
continue
g = self.grads[module]
v = g.t()
# print("Backward Error", torch.norm(g.t() @ g - v @ v.t()) / torch.norm(g.t() @ g), S / torch.sum(S))
if self.g_reset_factor[module]:
if module not in self.GG_inv:
self.GG_inv[module] = torch.eye(g.size(1), device=g.device, dtype=self.data_type)
else:
self.GG_inv[module] = self.GG_inv[module] * (1 - self.reset_weight) + self.reset_weight * torch.eye(
g.size(1), device=g.device, dtype=self.data_type)
self.g_reset_factor[module] = False
# GG = self.GG[module]
if self.method == 'exact':
self.GG_inv[module] = torch.inverse(
g.t() @ g * (1 - self.stat_decay) + self.stat_decay * self.GG[module])
elif self.method == 'low_rank':
self.GG_inv[module] = torch.inverse(
v @ v.t() * (1 - self.stat_decay) + self.stat_decay * self.GG[module])
elif self.method == 'approx':
self.GG_inv[module] = self.inverse(self.GG_inv[module] / self.stat_decay, v * (1 - self.stat_decay))
if self.rank != 1:
self.GG_Us[module], self.GG_Vt[module] = randomized_svd(self.GG_inv[module].to(torch.float32), self.rank)
else:
if self.sparse:
if self.sparsify[module]:
mask = (self.GG_inv[module].abs() > self.sparse_threshold).to(torch.int)
self.sparse_GG[module] = True
self.GG_sparse_factor[module] = (self.GG_inv[module] * mask).to(torch.float32).to_sparse_csr().to(self.GG_inv[module].device)
# print(mask.sum() / mask.numel(), mask.shape)
else:
self.sparse_GG[module] = False
self.GG_sparse_factor[module] = self.GG_inv[module].to(torch.float32)
def reduce_and_update_factors(self):
self.reduce_grads()
self.update_factors()
def compute_min_eigenvals(self):
self.a_min_eigenvals = {}
self.g_min_eigenvals = {}
for module in self.modules[:-1]:
d_a, Q_a = torch.linalg.eigh(self.AA_inv[module].to(torch.float32))
d_g, Q_g = torch.linalg.eigh(self.GG_inv[module].to(torch.float32))
self.a_min_eigenvals[module] = torch.min(d_a)
self.g_min_eigenvals[module] = torch.min(d_g)
if self.verbose:
import os
if not os.path.exists("a_eigen.csv"):
with open("a_eigen.csv", "w") as f:
f.write("min_a_eigen,min_a_abs_eigen,max_a_eigen,max_a_abs_eigen,condition_number\n")
with open("a_eigen.csv", "a") as f:
f.write(str(torch.min(d_a).item()) + "," + str(torch.min(torch.abs(d_a)).item()) + "," + str(
torch.max(d_a).item()) + "," + str(torch.max(torch.abs(d_a)).item()) + "," + str(
torch.max(torch.abs(d_a)).item() / torch.min(torch.abs(d_a)).item()) + "\n")
if not os.path.exists("g_eigen.csv"):
with open("g_eigen.csv", "w") as f:
f.write("min_g_eigen,min_g_abs_eigen,max_g_eigen,max_g_abs_eigen,condition_number\n")
with open("g_eigen.csv", "a") as f:
f.write(str(torch.min(d_g).item()) + "," + str(torch.min(torch.abs(d_g)).item()) + "," + str(
torch.max(d_g).item()) + "," + str(torch.max(torch.abs(d_g)).item()) + "," + str(
torch.max(torch.abs(d_g)).item() / torch.min(torch.abs(d_g)).item()) + "\n")
# print(min(torch.min(self.d_a[m]), torch.min(self.d_g[m])))
def step_mkor(self, closure=None):
# FIXME(CW): temporal fix for compatibility with Official LR scheduler.
if self.steps == 0:
self.reset_factors()
self.compute_preconditioning_costs()
group = self.param_groups[0]
lr = group['lr']
damping = group['damping']
updates = {}
if self.steps % self.inv_freq == 0 or self.steps < 10:
self.timer("reduce_and_update_factors", self.reduce_and_update_factors)
# self.compute_min_eigenvals()
for m in self.modules:
classname = m.__class__.__name__
p_grad_mat = self.timer("precondition", self._get_matrix_form_grad, m=m, classname=classname)
v = self.timer("precondition", self._get_natural_grad, m=m, p_grad_mat=p_grad_mat, damping=damping,
identity=m == self.modules[-1])
updates[m] = v
self.timer("update_weights", self._kl_clip_and_update_grad, updates=updates, lr=lr)
self.timer("update_weights", self._step, closure=closure)
self.steps += 1
def step_sgd(self, closure=None):
vg_sum = 0
lr_squared = self.param_groups[0]['lr'] ** 2
for m in self.modules:
vg_sum += ((m.weight.grad.data) ** 2).sum().item() * lr_squared
if m.bias is not None:
vg_sum += ((m.bias.grad.data) ** 2).sum().item() * lr_squared
nu = min(1.0, math.sqrt(self.kl_clip / vg_sum))
for m in self.modules:
m.weight.grad.data.mul_(nu)
if m.bias is not None:
m.bias.grad.data.mul_(nu)
self._step(closure=closure)
def step(self, closure=None):
if self.sgd:
self.step_sgd(closure)
self.steps = 0
else:
self.step_mkor(closure)
def reset_factors(self):
for m in self.GG_inv:
self.GG_inv[m] = torch.eye(self.GG_inv[m].size(0), device=self.GG_inv[m].device, dtype=self.data_type)
self.AA_inv[m] = torch.eye(self.AA_inv[m].size(0), device=self.AA_inv[m].device, dtype=self.data_type)
def compute_preconditioning_costs(self):
costs = []
self.sparsify = {}
for m in self.modules[:-1]:
costs.append(self.inputs[m].size(1) * self.grads[m].size(1) * (self.inputs[m].size(1) + self.grads[m].size(1)))
self.sparsify[m] = False
costs = torch.tensor(costs)
self.costs = costs / torch.sum(costs)
sum = 0
while sum < 0.7:
max, idx = torch.max(self.costs, 0)
self.costs[idx] = 0
sum += max
self.sparsify[self.modules[idx]] = True
| Mohammad-Mozaffari/mkor | bert/optimizers/hkor.py | hkor.py | py | 23,368 | python | en | code | 1 | github-code | 13 |
17041567174 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import json
from alipay.aop.api.constant.ParamConstants import *
from alipay.aop.api.domain.AlipayInsDataDsbRequestImageInfo import AlipayInsDataDsbRequestImageInfo
class AlipayInsDataDsbEstimateApplyModel(object):
def __init__(self):
self._accident_area_id = None
self._car_properties = None
self._case_properties = None
self._commercial_policy_no = None
self._compulsory_policy_no = None
self._engine_no = None
self._estimate_no = None
self._estimate_request_uuid = None
self._frame_no = None
self._garage_type = None
self._image_list = None
self._license_no = None
self._model_brand = None
self._new_car_price = None
self._repair_corp_properties = None
self._report_no = None
self._request_timestamp = None
self._survey_no = None
@property
def accident_area_id(self):
return self._accident_area_id
@accident_area_id.setter
def accident_area_id(self, value):
self._accident_area_id = value
@property
def car_properties(self):
return self._car_properties
@car_properties.setter
def car_properties(self, value):
self._car_properties = value
@property
def case_properties(self):
return self._case_properties
@case_properties.setter
def case_properties(self, value):
self._case_properties = value
@property
def commercial_policy_no(self):
return self._commercial_policy_no
@commercial_policy_no.setter
def commercial_policy_no(self, value):
self._commercial_policy_no = value
@property
def compulsory_policy_no(self):
return self._compulsory_policy_no
@compulsory_policy_no.setter
def compulsory_policy_no(self, value):
self._compulsory_policy_no = value
@property
def engine_no(self):
return self._engine_no
@engine_no.setter
def engine_no(self, value):
self._engine_no = value
@property
def estimate_no(self):
return self._estimate_no
@estimate_no.setter
def estimate_no(self, value):
self._estimate_no = value
@property
def estimate_request_uuid(self):
return self._estimate_request_uuid
@estimate_request_uuid.setter
def estimate_request_uuid(self, value):
self._estimate_request_uuid = value
@property
def frame_no(self):
return self._frame_no
@frame_no.setter
def frame_no(self, value):
self._frame_no = value
@property
def garage_type(self):
return self._garage_type
@garage_type.setter
def garage_type(self, value):
self._garage_type = value
@property
def image_list(self):
return self._image_list
@image_list.setter
def image_list(self, value):
if isinstance(value, list):
self._image_list = list()
for i in value:
if isinstance(i, AlipayInsDataDsbRequestImageInfo):
self._image_list.append(i)
else:
self._image_list.append(AlipayInsDataDsbRequestImageInfo.from_alipay_dict(i))
@property
def license_no(self):
return self._license_no
@license_no.setter
def license_no(self, value):
self._license_no = value
@property
def model_brand(self):
return self._model_brand
@model_brand.setter
def model_brand(self, value):
self._model_brand = value
@property
def new_car_price(self):
return self._new_car_price
@new_car_price.setter
def new_car_price(self, value):
self._new_car_price = value
@property
def repair_corp_properties(self):
return self._repair_corp_properties
@repair_corp_properties.setter
def repair_corp_properties(self, value):
self._repair_corp_properties = value
@property
def report_no(self):
return self._report_no
@report_no.setter
def report_no(self, value):
self._report_no = value
@property
def request_timestamp(self):
return self._request_timestamp
@request_timestamp.setter
def request_timestamp(self, value):
self._request_timestamp = value
@property
def survey_no(self):
return self._survey_no
@survey_no.setter
def survey_no(self, value):
self._survey_no = value
def to_alipay_dict(self):
params = dict()
if self.accident_area_id:
if hasattr(self.accident_area_id, 'to_alipay_dict'):
params['accident_area_id'] = self.accident_area_id.to_alipay_dict()
else:
params['accident_area_id'] = self.accident_area_id
if self.car_properties:
if hasattr(self.car_properties, 'to_alipay_dict'):
params['car_properties'] = self.car_properties.to_alipay_dict()
else:
params['car_properties'] = self.car_properties
if self.case_properties:
if hasattr(self.case_properties, 'to_alipay_dict'):
params['case_properties'] = self.case_properties.to_alipay_dict()
else:
params['case_properties'] = self.case_properties
if self.commercial_policy_no:
if hasattr(self.commercial_policy_no, 'to_alipay_dict'):
params['commercial_policy_no'] = self.commercial_policy_no.to_alipay_dict()
else:
params['commercial_policy_no'] = self.commercial_policy_no
if self.compulsory_policy_no:
if hasattr(self.compulsory_policy_no, 'to_alipay_dict'):
params['compulsory_policy_no'] = self.compulsory_policy_no.to_alipay_dict()
else:
params['compulsory_policy_no'] = self.compulsory_policy_no
if self.engine_no:
if hasattr(self.engine_no, 'to_alipay_dict'):
params['engine_no'] = self.engine_no.to_alipay_dict()
else:
params['engine_no'] = self.engine_no
if self.estimate_no:
if hasattr(self.estimate_no, 'to_alipay_dict'):
params['estimate_no'] = self.estimate_no.to_alipay_dict()
else:
params['estimate_no'] = self.estimate_no
if self.estimate_request_uuid:
if hasattr(self.estimate_request_uuid, 'to_alipay_dict'):
params['estimate_request_uuid'] = self.estimate_request_uuid.to_alipay_dict()
else:
params['estimate_request_uuid'] = self.estimate_request_uuid
if self.frame_no:
if hasattr(self.frame_no, 'to_alipay_dict'):
params['frame_no'] = self.frame_no.to_alipay_dict()
else:
params['frame_no'] = self.frame_no
if self.garage_type:
if hasattr(self.garage_type, 'to_alipay_dict'):
params['garage_type'] = self.garage_type.to_alipay_dict()
else:
params['garage_type'] = self.garage_type
if self.image_list:
if isinstance(self.image_list, list):
for i in range(0, len(self.image_list)):
element = self.image_list[i]
if hasattr(element, 'to_alipay_dict'):
self.image_list[i] = element.to_alipay_dict()
if hasattr(self.image_list, 'to_alipay_dict'):
params['image_list'] = self.image_list.to_alipay_dict()
else:
params['image_list'] = self.image_list
if self.license_no:
if hasattr(self.license_no, 'to_alipay_dict'):
params['license_no'] = self.license_no.to_alipay_dict()
else:
params['license_no'] = self.license_no
if self.model_brand:
if hasattr(self.model_brand, 'to_alipay_dict'):
params['model_brand'] = self.model_brand.to_alipay_dict()
else:
params['model_brand'] = self.model_brand
if self.new_car_price:
if hasattr(self.new_car_price, 'to_alipay_dict'):
params['new_car_price'] = self.new_car_price.to_alipay_dict()
else:
params['new_car_price'] = self.new_car_price
if self.repair_corp_properties:
if hasattr(self.repair_corp_properties, 'to_alipay_dict'):
params['repair_corp_properties'] = self.repair_corp_properties.to_alipay_dict()
else:
params['repair_corp_properties'] = self.repair_corp_properties
if self.report_no:
if hasattr(self.report_no, 'to_alipay_dict'):
params['report_no'] = self.report_no.to_alipay_dict()
else:
params['report_no'] = self.report_no
if self.request_timestamp:
if hasattr(self.request_timestamp, 'to_alipay_dict'):
params['request_timestamp'] = self.request_timestamp.to_alipay_dict()
else:
params['request_timestamp'] = self.request_timestamp
if self.survey_no:
if hasattr(self.survey_no, 'to_alipay_dict'):
params['survey_no'] = self.survey_no.to_alipay_dict()
else:
params['survey_no'] = self.survey_no
return params
@staticmethod
def from_alipay_dict(d):
if not d:
return None
o = AlipayInsDataDsbEstimateApplyModel()
if 'accident_area_id' in d:
o.accident_area_id = d['accident_area_id']
if 'car_properties' in d:
o.car_properties = d['car_properties']
if 'case_properties' in d:
o.case_properties = d['case_properties']
if 'commercial_policy_no' in d:
o.commercial_policy_no = d['commercial_policy_no']
if 'compulsory_policy_no' in d:
o.compulsory_policy_no = d['compulsory_policy_no']
if 'engine_no' in d:
o.engine_no = d['engine_no']
if 'estimate_no' in d:
o.estimate_no = d['estimate_no']
if 'estimate_request_uuid' in d:
o.estimate_request_uuid = d['estimate_request_uuid']
if 'frame_no' in d:
o.frame_no = d['frame_no']
if 'garage_type' in d:
o.garage_type = d['garage_type']
if 'image_list' in d:
o.image_list = d['image_list']
if 'license_no' in d:
o.license_no = d['license_no']
if 'model_brand' in d:
o.model_brand = d['model_brand']
if 'new_car_price' in d:
o.new_car_price = d['new_car_price']
if 'repair_corp_properties' in d:
o.repair_corp_properties = d['repair_corp_properties']
if 'report_no' in d:
o.report_no = d['report_no']
if 'request_timestamp' in d:
o.request_timestamp = d['request_timestamp']
if 'survey_no' in d:
o.survey_no = d['survey_no']
return o
| alipay/alipay-sdk-python-all | alipay/aop/api/domain/AlipayInsDataDsbEstimateApplyModel.py | AlipayInsDataDsbEstimateApplyModel.py | py | 11,087 | python | en | code | 241 | github-code | 13 |
14541951925 | import time
import sklearn
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
from sklearn.preprocessing import StandardScaler
from keras.utils import np_utils
from keras.models import Sequential
from keras.layers import Dense, Activation, Conv1D, MaxPooling1D, Flatten
from keras.layers import Dropout
np.random.seed(10)
# 計時開始
tStart = time.time()
# =============================================================================
# Step_1 載入資料集
x_train, y_train = np.load("traindata.npy"), np.load("trainlabel.npy")
x_val, y_val = np.load("validationdata.npy"), np.load("validationlabel.npy")
x_test = np.load("testdata.npy")
print("x_train size: ", x_train.shape, "\n-------------------------------------")
print("x_val size: ",x_val.shape, "\n-------------------------------------")
print("x_test size: ", x_test.shape, "\n-------------------------------------")
print("y_train size: ", x_train.shape, "\n-------------------------------------")
print("y_val size: ",x_val.shape, "\n=======================================")
# =============================================================================
# =============================================================================
# Step_2 資料預處理
x_train_0 = StandardScaler().fit_transform(x_train[:, 0, :])
x_train_1 = StandardScaler().fit_transform(x_train[:, 1, :])
x_train = np.hstack([x_train_0, x_train_1]).reshape(3360, 2, 20000)
x_val_0 = StandardScaler().fit_transform(x_val[:, 0, :])
x_val_1 = StandardScaler().fit_transform(x_val[:, 1, :])
x_val = np.hstack([x_val_0, x_val_1]).reshape(480, 2, 20000)
x_test_0 = StandardScaler().fit_transform(x_test[:, 0, :])
x_test_1 = StandardScaler().fit_transform(x_test[:, 1, :])
x_test = np.hstack([x_test_0, x_test_1]).reshape(960, 2, 20000)
# =============================================================================
# =============================================================================
# Step_3 利用OneHot對label進行編碼
y_train_onehot = np_utils.to_categorical(y_train)
y_val_onehot = np_utils.to_categorical(y_val)
# =============================================================================
# =============================================================================
# Step_4 建立訓練模型
model = Sequential()
# input (None, 20000) / output (6, 10000)
model.add(Conv1D(batch_input_shape=(None, 2, 20000),
filters=6,
kernel_size=5,
strides=1,
padding='same',
data_format='channels_first',
activation='relu'))
model.add(MaxPooling1D(pool_size=2,
strides=2,
padding='same',
data_format='channels_first'))
# input (6, 10000) / output (6, 5000)
model.add(Conv1D(filters=6,
kernel_size=5,
strides=1,
padding='same',
data_format='channels_first',
activation='relu'))
model.add(MaxPooling1D(pool_size=2,
strides=2,
padding='same',
data_format='channels_first'))
# input (6, 5000) / output (16, 2500)
model.add(Conv1D(filters=16,
kernel_size=5,
strides=1,
padding='same',
data_format='channels_first',
activation='relu'))
model.add(MaxPooling1D(pool_size=2,
strides=2,
padding='same',
data_format='channels_first'))
# input (16, 2500) / output (16, 1250)
model.add(Conv1D(filters=16,
kernel_size=5,
strides=1,
padding='same',
data_format='channels_first',
activation='relu'))
model.add(MaxPooling1D(pool_size=2,
strides=2,
padding='same',
data_format='channels_first'))
# input (16, 1250) / output (36, 625)
model.add(Conv1D(filters=36,
kernel_size=5,
strides=1,
padding='same',
data_format='channels_first',
activation='relu'))
model.add(MaxPooling1D(pool_size=2,
strides=2,
padding='same',
data_format='channels_first'))
# input (36, 625) / output (36, 313)
model.add(Conv1D(filters=36,
kernel_size=5,
strides=1,
padding='same',
data_format='channels_first',
activation='relu'))
model.add(MaxPooling1D(pool_size=2,
strides=2,
padding='same',
data_format='channels_first'))
# input (36, 625) / output (64, 157)
model.add(Conv1D(filters=64,
kernel_size=5,
strides=1,
padding='same',
data_format='channels_first',
activation='relu'))
model.add(MaxPooling1D(pool_size=2,
strides=2,
padding='same',
data_format='channels_first'))
# input (64, 157) / output (64, 79)
model.add(Conv1D(filters=64,
kernel_size=5,
strides=1,
padding='same',
data_format='channels_first',
activation='relu'))
model.add(MaxPooling1D(pool_size=2,
strides=2,
padding='same',
data_format='channels_first'))
# input (64, 79) / output (108, 40)
model.add(Conv1D(filters=108,
kernel_size=5,
strides=1,
padding='same',
data_format='channels_first',
activation='relu'))
model.add(MaxPooling1D(pool_size=2,
strides=2,
padding='same',
data_format='channels_first'))
# input (108, 40) / output (108, 20)
model.add(Conv1D(filters=108,
kernel_size=5,
strides=1,
padding='same',
data_format='channels_first',
activation='relu'))
model.add(MaxPooling1D(pool_size=2,
strides=2,
padding='same',
data_format='channels_first'))
# input (108, 20) / output (156, 10)
model.add(Conv1D(filters=156,
kernel_size=5,
strides=1,
padding='same',
data_format='channels_first',
activation='relu'))
model.add(MaxPooling1D(pool_size=2,
strides=2,
padding='same',
data_format='channels_first'))
# input (156, 10) / output (156, 5)
model.add(Conv1D(filters=156,
kernel_size=5,
strides=1,
padding='same',
data_format='channels_first',
activation='relu'))
model.add(MaxPooling1D(pool_size=2,
strides=2,
padding='same',
data_format='channels_first'))
model.add(Dropout(0.25))
# Fc
model.add(Flatten())
model.add(Dense(units=780,
kernel_initializer='random_uniform',
bias_initializer='zeros',
activation='relu'))
model.add(Dropout(0.5))
model.add(Dense(units=128,
kernel_initializer='random_uniform',
bias_initializer='zeros',
activation='relu'))
model.add(Dropout(0.5))
model.add(Dense(units=4,
activation='softmax'))
print(model.summary())
# =============================================================================
# =============================================================================
# Step_5 訓練參數設定
keras.optimizers.adam(lr=0.0001, beta_1=0.9, beta_2=0.999, epsilon=None, decay=0.0, amsgrad=False)
model.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['accuracy'])
train_history=model.fit(x=x_train,
y=y_train_onehot,
validation_data=(x_val, y_val_onehot),
epochs=500,
batch_size=64,
shuffle=True,
verbose=1)
# =============================================================================
# =============================================================================
# Step_6 顯示訓練結果: Accurancy & Loss
def show_train_history(train_history,train,validation):
plt.plot(train_history.history[train])
plt.plot(train_history.history[validation])
plt.title('Train History')
plt.ylabel(train)
plt.xlabel('Epoch')
plt.legend(['train', 'validation'], loc='right')
plt.show()
show_train_history(train_history,'acc','val_acc')
show_train_history(train_history,'loss','val_loss')
# =============================================================================
# =============================================================================
# Step_7 用測試集檢驗模型
scores = model.evaluate(x_val, y_val_onehot)
print('score= ', scores[0])
print('accurancy= ', scores[1])
# =============================================================================
# =============================================================================
# Step_8 儲存模型
model.save('model.h5')
# =============================================================================
# 計時結束
tEnd = time.time()
print('==========================================\ntime : ', tEnd - tStart, '(s)')
| YT1202/DSP2020_Final-Project | Train.py | Train.py | py | 9,809 | python | en | code | 0 | github-code | 13 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.