seq_id stringlengths 4 11 | text stringlengths 113 2.92M | repo_name stringlengths 4 125 ⌀ | sub_path stringlengths 3 214 | file_name stringlengths 3 160 | file_ext stringclasses 18
values | file_size_in_byte int64 113 2.92M | program_lang stringclasses 1
value | lang stringclasses 93
values | doc_type stringclasses 1
value | stars int64 0 179k ⌀ | dataset stringclasses 3
values | pt stringclasses 78
values |
|---|---|---|---|---|---|---|---|---|---|---|---|---|
22316226689 | from keras import applications
from keras.preprocessing.image import ImageDataGenerator
from keras import optimizers
from keras.models import Sequential, Model
from keras.layers import Dropout, Flatten, Dense
from keras.layers import Input, BatchNormalization
from keras import metrics
import time
import cv2
img_width, img_height = 224, 224
batch_size = 10
output_classes = 2
n200_data_dir = '../data/n200'
n150_data_dir = '../data/n150'
n100_data_dir = '../data/n100'
n50_data_dir = '../data/n50'
n25_data_dir = '../data/n25'
validation_data_dir = '../data/test'
val_samples = 100
val_steps = val_samples / batch_size
def instantiate_model(main_model):
inp = Input(shape=(img_width, img_height, 3), name='input_image')
for layer in main_model.layers:
layer.trainable=False
main_model = main_model(inp)
main_out = Dense(output_classes, activation='softmax')(main_model)
model = Model(input=inp, output=main_out)
model.compile(loss='categorical_crossentropy',
optimizer='adam',
metrics=['accuracy'])
return model
print('\n==== Instantiating source models')
vgg_source = applications.VGG16(weights='imagenet',input_shape=(224,224,3), pooling='max', include_top=False)
inception_source = applications.InceptionV3(weights='imagenet', input_shape=(224,224,3), pooling='max', include_top=False)
mobilenet_source = applications.MobileNet(weights='imagenet', include_top=False,pooling='max', input_shape=(224, 224, 3))
print('==== Instantiated source models\n')
print('\n==== Loading data')
train_datagen = ImageDataGenerator(
rescale=1./255,
shear_range=0.2,
zoom_range=0.2,
rotation_range=20.,
horizontal_flip=True)
test_datagen = ImageDataGenerator(rescale=1. / 255)
n200_generator = train_datagen.flow_from_directory(
n200_data_dir,
target_size=(img_width, img_height),
batch_size=batch_size,
class_mode='categorical')
n150_generator = train_datagen.flow_from_directory(
n150_data_dir,
target_size=(img_width, img_height),
batch_size=batch_size,
class_mode='categorical')
n100_generator = train_datagen.flow_from_directory(
n100_data_dir,
target_size=(img_width, img_height),
batch_size=batch_size,
class_mode='categorical')
n50_generator = train_datagen.flow_from_directory(
n50_data_dir,
target_size=(img_width, img_height),
batch_size=batch_size,
class_mode='categorical')
n25_generator = train_datagen.flow_from_directory(
n25_data_dir,
target_size=(img_width, img_height),
batch_size=batch_size,
class_mode='categorical')
validation_generator = test_datagen.flow_from_directory(
validation_data_dir,
target_size=(img_width, img_height),
batch_size=batch_size,
class_mode='categorical')
print('==== Loaded data\n')
from PIL import ImageFile
ImageFile.LOAD_TRUNCATED_IMAGES = True
print('\n==== Fitting models')
source_models = [mobilenet_source, vgg_source, inception_source]
model_names = ['MobileNet', 'VGG16', 'InceptionV3']
generators = [n25_generator, n50_generator, n100_generator, n150_generator, n200_generator]
ns = [25, 50, 100, 150, 200]
epochs = [20,40]
hist_list = []
for (src, src_name) in zip(source_models, model_names):
model = instantiate_model(src)
for (n, train_generator) in zip(ns, generators):
train_steps = 2 * n / batch_size
for e in epochs:
print('==== Source: {}\tImages per class: {}\tEpochs: {}'.format(src_name, n, e))
t0 = time.time()
hist = model.fit_generator(
train_generator,
steps_per_epoch=train_steps,#nb_train_samples,
epochs=e,
validation_data=validation_generator,
validation_steps=val_steps)
t1 = time.time()
t = t1 - t0
hist_dict = {
'source_model':src_name,
'images_per_class':n,
'epochs':e,
'hist':hist.history,
'training_time':t
}
hist_list.append(hist_dict)
path = '/home/ubuntu/image_classification/results/circ/{}_n{}_e{}.h5'.format(src_name, n, e)
model.save(path)
print('==== models fit\n')
print('\n==== outputting histories')
import pickle
path = '/home/ubuntu/image_classification/results/circ/circ.p'
with open(path, 'wb') as output:
pickle.dump(hist_list, output)
print('\n==== outputted histories to {}'.format(path))
| kirklandnuts/image_classification | src/train_var_sizes.py | train_var_sizes.py | py | 4,501 | python | en | code | 0 | github-code | 13 |
38240297696 | #! /usr/bin/python3
import sys
import math
def readIn():
for line in sys.stdin:
uv = line.split()
return int(uv[0]), int(uv[1])
h, v = readIn()
angle_rad = math.radians(v)
sinus = math.sin(angle_rad)
ans = math.ceil(h/sinus)
#print(ans)
sys.stdout.write(str(ans))
| AnimalMother83/Kattis-solutions | ladder.py | ladder.py | py | 283 | python | en | code | 0 | github-code | 13 |
10453549953 | #!/usr/bin/env python3
# -*- coding:utf-8 -*-
# ======================================================
# @File: : main
# @Author : forward_huan
# @Date : 2023/2/15 20:23
# @Desc :
# ======================================================
import argparse
import os
import re
import threading
from pip._vendor.distlib.compat import raw_input
template = """import sys
from #PACKAGE# import #CLASS_NAME#
from PyQt5.QtWidgets import QApplication, QWidget
app = QApplication(sys.argv)
ui = #UI_DESIGNER#(#PARAMS#)
if isinstance(ui, QWidget):
win = ui
else:
win = QWidget()
ui.setupUi(win)
win.show()
sys.exit(app.exec_())
"""
def get_version():
return "1.0.0.0"
def parse_args():
parse = argparse.ArgumentParser(description="PyQt5界面预览工具")
parse.add_argument(
"-f", "--file_path", type=str, required=True, help="PyQt5 UI界面的Python文件")
parse.add_argument(
"-r", "--root_path", type=str, required=True, default=".", help="项目根目录")
parse.add_argument(
"-p", "--params", type=str, nargs="+", default=[], help="UI界面文件中所需的参数")
parse.add_argument(
"-v", "--version", action='version', version=get_version(), help="显示版本号")
return parse.parse_args()
def get_class_name(codes):
class_names = []
for line in codes:
t_line = str(line).strip()
if t_line.startswith("class"):
class_names.append(t_line[5:t_line.find("(")].strip())
return class_names
def trans(item):
if isinstance(item, str):
return f'"{item}"'
return str(item)
def get_package(file_path: str, root_path: str):
f_path = re.sub(r"([\\|/])+", ".", file_path)
r_path = re.sub(r"([\\|/])+", ".", root_path)
return f_path[len(r_path) + 1:-3]
def get_template(file_path, root_path, class_name, params):
return template \
.replace("#UI_DESIGNER#", class_name) \
.replace("#PARAMS#", ",".join([trans(item) for item in params])) \
.replace("#PACKAGE#", get_package(file_path, root_path)) \
.replace("#CLASS_NAME#", class_name)
def run(python_path, py_path):
try:
os.system(f"{python_path} {py_path}")
except Exception as ex:
print(str(ex))
def run_ui(file_path: str, root_path, params: list):
print("-" * 20)
print("启动预览文件", file_path)
print("项目根路径", root_path)
print("参数配置", params)
try:
if not file_path.endswith("py"):
raise Exception(f"该文件不是Python可执行文件")
with open(file_path, "r", encoding="utf8")as f:
temp = f.readlines()
class_names = get_class_name(temp)
class_name = class_names[0]
if len(class_names) > 1:
name_str = '\n'.join([f'{i} {name}' for i, name in enumerate(class_names)])
msg = f"请选择需要加载的类序号\n{name_str}\n"
class_name = class_names[int(raw_input(msg))]
py_path = os.path.join(root_path, "PreQtUI.py")
python_path = os.path.join(root_path, r"venv\Scripts\python.exe")
if not os.path.exists(python_path):
python_path = "python"
with open(py_path, "w", encoding="utf8")as f:
f.write(get_template(file_path, root_path, class_name, params))
threading.Thread(target=run, args=(python_path, py_path)).start()
except Exception as ex:
print(str(ex))
if __name__ == '__main__':
try:
args = parse_args()
run_ui(args.file_path, args.root_path, args.params)
except Exception as e:
print(str(e))
| forwardhuan/PreQtUI | main.py | main.py | py | 3,624 | python | en | code | 0 | github-code | 13 |
10366143787 | # Программирование на языке высокого уровня (Python).
# https://www.yuripetrov.ru/edu/python
# Задание task_07_02_02.
#
# Выполнил: Буц И.Д.
# Группа: АДЭУ-211
# E-mail: !!!
"""
Ошибки (номера строк через пробел, данная строка - №2): !!!
"""
def primes(a, b):
"""Вернуть список простых чисел на отрезке от 'a' до 'b'."""
res = []
c = 0 # чсило делителей
for i in range(a, b + 1): # Чтобы число b включалось в цикл, то нужно прибавить +1
for j in range(i + 1): # Чтобы все числа i на промежутке от a до b были задейстованы, то надо прибавить +1
if i % (j + 1) == 0:
c += 1
if c == 2: # неверный уровень, необходимо, чтобы список попадали только числа с 2 делителями, а не с более
res.append(i)
else: # Необходимо добавить строчку else, чтобы обнулить чсило делителей(c)
c = 0
return res
primes(10, 21) | Igor69-web/OOAP-211 | лаб7/2.py | 2.py | py | 1,317 | python | ru | code | 0 | github-code | 13 |
16011525630 | # -*- coding: utf-8 -*-
"""
Created on Sun May 5 16:08:11 2019
@author: 12718
"""
import tensorflow as tf
import numpy as np
from tensorflow.examples.tutorials.mnist import input_data
import time
time0 = time.time()
mnist = input_data.read_data_sets('MNIST_data', one_hot = True)
x = tf.placeholder('float', [None, 784])
y_ = tf.placeholder('float', [None, 10]) # 0~9 digits
def conv2(x, w):
return tf.nn.conv2d(x,w,strides = [1,1,1,1],padding = 'SAME')
def pooling_2x2(x):
return tf.nn.max_pool(x,ksize = [1,2,2,1],strides = [1,2,2,1], padding = 'SAME')
### convolution layer1
w_conv1 = tf.Variable(tf.truncated_normal([5,5,1,8], stddev = 0.1))
b_conv1 = tf.Variable(tf.constant(0.1, shape = [8]))
x_in = tf.reshape(x, [-1, 28,28, 1])
h_conv1 = tf.nn.relu(conv2(x_in,w_conv1)+b_conv1) #[-1,28,28,1] ==> [-1,28,28,8]
h_pool1 = pooling_2x2(h_conv1) #[-1,14,14,8]
### convolution layer2
w_conv2 = tf.Variable(tf.truncated_normal([5,5,8,16],stddev = 0.1))
b_conv2 = tf.Variable(tf.constant(0.1, shape = [16]))
h_conv2 = tf.nn.relu(conv2(h_pool1, w_conv2)+b_conv2) #[-1,14,14,8] ==>[-1,14,14,16]
h_pool2 = pooling_2x2(h_conv2) #[-1,7,7,16]
### fully connected layer
h_pool2_flat = tf.reshape(h_pool2, [-1, 7*7*16])
h_fc1 = tf.nn.relu(tf.layers.dense(h_pool2_flat, 512))
### dropout layyer ==> reduce the overfitting, always use at fully connected layer
keep_prob = tf.placeholder('float')
h_fc1_drop = tf.nn.dropout(h_fc1, keep_prob)
### output layer
w_fc2 = tf.Variable(tf.truncated_normal([512, 10], stddev = 0.1))
b_fc2 = tf.Variable(tf.constant(0.1,shape = [10]))
h_fc2 = tf.matmul(h_fc1_drop, w_fc2)+b_fc2
### cross_entropy
#cross_entropy = tf.reduce_sum(tf.nn.softmax_cross_entropy_with_logits(logits = h_fc2, labels = y_)) #1
cross_entropy = tf.losses.softmax_cross_entropy(onehot_labels = y_, weights = 1.0, logits = h_fc2)
'''上面的方法已经对交叉熵求平均'''
train_step = tf.train.AdamOptimizer(1e-4).minimize(cross_entropy)
correct_prediction = tf.equal(tf.argmax(y_,1), tf.argmax(tf.nn.softmax(h_fc2),1))
accuracy = tf.reduce_mean(tf.cast(correct_prediction, 'float'))
#accuracy = tf.reduce_mean(tf.cast(correct_prediction, "float"))
### train
batch_size = 100
init = tf.global_variables_initializer()
with tf.Session() as sess:
sess.run(init)
for i in range(3000):
batch_x, batch_y = mnist.train.next_batch(batch_size)
sess.run(train_step, feed_dict = {y_:batch_y, x:batch_x, keep_prob:0.5})
if i%500 == 0:
train_accuracy = sess.run(accuracy, feed_dict = {y_:batch_y, x:batch_x, keep_prob:1.0})
print ('step %4d, | the train accuracy:'%i, train_accuracy)
test_accuracy = sess.run(accuracy, feed_dict = {y_:mnist.test.labels, x:mnist.test.images, keep_prob:1.0})
print ('the test accuracy:', test_accuracy)
time1 = time.time()
print ('the time of calculation: ', time1-time0)
| MingyangChen1994/machinelearning | testcnn.py | testcnn.py | py | 2,899 | python | en | code | 0 | github-code | 13 |
71603041618 | from flask import Flask
from flask_restful import Resource, Api, reqparse
from flask import request
from flask import jsonify
from flask_cors import CORS
import json
import configparser
import networkx as nx
import graphWork as gw
app = Flask(__name__)
#api = Api(app)
CORS(app)
def get_steam_api_key():
config = configparser.ConfigParser()
config.read('config.ini')
return config['steamWebApi']['api']
@app.route('/data', methods=['GET', 'POST'])
def handleUsers():
if request.method == 'GET':
response = jsonify({})
response.headers.add('Access-Control-Allow-Origin', '*')
return response
elif request.method == 'POST':
requestJson = json.loads(request.get_data())
steam_ids = requestJson['users']
recent = requestJson['playedRecent']
timeCheck = requestJson['useTime']
time_threshold = float(requestJson['threshold'])
game_id_map = {}
owned_count = {}
G = nx.Graph()
gw.addGamesToGraph(G, steam_ids, game_id_map, owned_count, get_steam_api_key(), recent, timeCheck, time_threshold)
#add info to the graph nodes
pagerank = nx.pagerank(G)
for val in G.nodes():
G.nodes[val]['cluster'] = owned_count[val]
G.nodes[val]['pagerank'] = pagerank[val]
#write out to local file
nx.write_gexf(G, "graph.gexf")
response = jsonify({"sounds good man": True})
response.headers.add('Access-Control-Allow-Origin', '*')
return response
if __name__ == '__main__':
app.run() | brandonsness/NetSciProj | src/python/app.py | app.py | py | 1,574 | python | en | code | 0 | github-code | 13 |
14212507430 | from __future__ import print_function
from __future__ import division
import pandas as pd
import torch
from torch.utils.data import Dataset, DataLoader, random_split
import torch.nn as nn
import torch.optim as optim
import numpy as np
import torchvision
from torchvision import datasets, models, transforms
import skimage.transform as st
import matplotlib.pyplot as plt
import os
from utils import *
import random
from itertools import product
root_path = 'data/T1_images'
random.seed(0)
# from pandas.core.dtypes.base import E
class BrainDataset(Dataset):
def __init__(self, data_dir, predictor, images_per_group):
# super(BrainIterableDataset).__init__()
self.data_dir = data_dir
self.predictor = predictor
self.images_per_group = images_per_group
self.metadata = pd.read_csv(f'{root_path}/subj_data.csv')
self.metadata = self.metadata.loc[self.metadata['ETHNIC_ID'] != 0]
self.all_subject_combs = self.get_augmentation_combinations()
self.onehot_race = pd.get_dummies(self.metadata['ETHNIC_ID']).values
self.labels = [self.get_labels(img[0]) for img in self.all_subject_combs]
def __len__(self):
return len(self.all_subject_combs)
def get_augmentation_combinations(self):
subjects = np.array([img.split('_')[0] for img in sorted(os.listdir(self.data_dir)) if img.split('_')[0] in self.metadata['subjID'].values])
race = np.array([self.metadata.loc[self.metadata['subjID'] == subj]['ETHNIC_ID'].values[0] for subj in subjects])
slices = ['minus5', 'minus10', '', 'plus5', 'plus10']
flip = ['flip1', 'noflip', 'flip2', 'flip12']
resize = ['original', 'small', 'smaller']
all_subject_combs = []
for group in np.unique(race):
indices = np.where(race == group, True, False)
subj_in_group = subjects[indices]
combs = list(product(subj_in_group, slices, flip, resize))
random.shuffle(combs)
combs = combs[:self.images_per_group]
all_subject_combs += combs
return all_subject_combs
def get_labels(self, subject_id):
if self.predictor == 'sex':
label = self.metadata.loc[self.metadata['subjID'] == subject_id]['SEX_ID'].values[0] - 1
elif self.predictor == 'race':
label = torch.tensor(self.onehot_race[(self.metadata['subjID'] == subject_id).values][0], dtype=torch.float32)
elif self.predictor == 'age':
label = self.metadata.loc[self.metadata['subjID'] == subject_id]['AGE'].values[0]
return label
def __getitem__(self, idx):
IMAGE_SIZE = (224, 224)
if self.all_subject_combs[idx][1] == '':
filename = f'{self.all_subject_combs[idx][0]}_{SLICE}'
else:
filename = f'{self.all_subject_combs[idx][0]}_{SLICE}_{self.all_subject_combs[idx][1]}'
image = np.load(f'{self.data_dir}/{filename}.npy')
if self.all_subject_combs[idx][2] == 'flip1':
image = image[::-1]
elif self.all_subject_combs[idx][2] == 'flip2':
image == image[:,::-1]
elif self.all_subject_combs[idx][2] == 'flip12':
image = image[::-1, ::-1]
if self.all_subject_combs[idx][3] == 'original':
pass
else:
if self.all_subject_combs[idx][3] == 'small':
shrink = 90/100
else:
shrink = 80/100
pad = (1-shrink) /2
smaller_image = st.resize(image, (int(len(image)*(shrink)), int(len(image[1])*(shrink))))
smaller_image = np.pad(smaller_image, pad_width=((int(len(image)*pad), int(len(image)*pad)), (int(len(image[1])*pad), int(len(image[1])*pad))))
image = smaller_image
resized_image = st.resize(image, IMAGE_SIZE)
image_3channel = torch.from_numpy(np.tile(np.expand_dims(resized_image, 0), (3,1,1)))
subject_id = filename.split('_')[0]
label = self.get_labels(subject_id)
return image_3channel, label
if __name__ == '__main__':
PREDICTOR = 'sex'
SLICE = 'sagittal'
data_dir = f'{root_path}/{SLICE}'
# Models to choose from [resnet, alexnet, vgg, squeezenet, densenet, inception]
model_name = "resnet"
# Number of classes in the dataset
num_classes = 2 if PREDICTOR == 'sex' else 4
# Batch size for training (change depending on how much memory you have)
batch_size = 3
# Number of epochs to train for
num_epochs = 50
# Flag for feature extracting. When False, we finetune the whole model,
# when True we only update the reshaped layer params
feature_extract = True
print(f'Predict: {PREDICTOR}, Slice: {SLICE}')
print(f'Model: {model_name}, # Classes: {num_classes}, Batch Size: {batch_size}, Epochs: {num_epochs}')
dataset = BrainDataset(data_dir, PREDICTOR, 800)
train_size = int(0.7*len(dataset))
val_size = int(0.1*len(dataset))
test_size = int(len(dataset) - train_size - val_size)
train_set, val_set, test_set = random_split(dataset, [train_size, val_size, test_size])
train_loader = DataLoader(train_set, batch_size=batch_size, shuffle=True)
val_loader = DataLoader(val_set, batch_size=batch_size, shuffle=True)
test_loader = DataLoader(test_set, batch_size=batch_size, shuffle=True)
dataloaders_dict = {"train": train_loader, "val": val_loader, "test": test_loader}
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
# Initialize the model for this run
model_ft, input_size = initialize_model(model_name, num_classes, feature_extract, use_pretrained=True)
# Print the model we just instantiated
print(model_ft)
# Send the model to GPU
model_ft = model_ft.to(device)
# Gather the parameters to be optimized/updated in this run. If we are
# finetuning we will be updating all parameters. However, if we are
# doing feature extract method, we will only update the parameters
# that we have just initialized, i.e. the parameters with requires_grad
# is True.
params_to_update = model_ft.parameters()
print("Params to learn:")
if feature_extract:
params_to_update = []
for name,param in model_ft.named_parameters():
if param.requires_grad == True:
params_to_update.append(param)
print("\t",name)
else:
for name,param in model_ft.named_parameters():
if param.requires_grad == True:
print("\t",name)
# Observe that all parameters are being optimized
optimizer_ft = optim.SGD(params_to_update, lr=0.001, momentum=0.9)
# Setup the loss fxn
criterion = nn.CrossEntropyLoss()
# Train and evaluate
model_ft, hist = train_model(model_ft, dataloaders_dict, criterion, optimizer_ft, PREDICTOR, num_epochs=num_epochs, is_inception=(model_name=="inception"))
torch.save(model_ft.state_dict(), f'models/{model_name}_{PREDICTOR}_{SLICE}_lr=0.001.pth')
test_model(model_ft, dataloaders_dict, PREDICTOR) | weimegan/brain-mris-race | train.py | train.py | py | 6,689 | python | en | code | 0 | github-code | 13 |
27583986519 | n=int(input())
list1=input().split()
flag=0
for i in range(n-1):
if n==1:
break
if int(list1[i])>int(list1[i+1]):
flag=1
break
if flag==0:
print("yes")
else:
print("no")
| devikamadasamy/beginner | sorted_or_not.py | sorted_or_not.py | py | 183 | python | en | code | 0 | github-code | 13 |
17504577849 | import random
from datetime import datetime, timedelta, timezone
from django.contrib import messages
from django.db.models import Q
from django.shortcuts import render, redirect
from card.forms import CardGenerateForm
from card.models import Card
def cards_list(request):
"""Render home page. Show card list"""
cards = Card.objects.filter().order_by('-created_date')
cards_idx = [i + 1 for i, card in enumerate(cards)]
# update card status if term_activity grater than current date
now = datetime.now(timezone.utc)
for card in cards:
if card.end_activity_date < now:
card.status = 'expired'
card.save()
context = {
'user': request.user,
'cards': cards,
'cards_idx': cards_idx,
}
return render(request, 'cards_list.html', context)
def card_generator(request):
"""Generate cards"""
if request.method == 'POST':
form = CardGenerateForm(request.POST)
if form.is_valid():
card_series = form.cleaned_data['card_series'].upper()
digits_number = int(form.cleaned_data['digits_number'])
card_number = int(form.cleaned_data['card_number'])
term_activity = form.cleaned_data['term_activity']
total = form.cleaned_data['total']
card_nums = generate_card_numbers(card_series, digits_number, card_number)
for number in card_nums:
card = Card(
card_series=card_series,
card_number=number,
end_activity_date=datetime.now() + timedelta(days=int(term_activity)),
total=total
)
card.save()
if len(card_nums) < card_number:
available_quantity = card_number - len(card_nums)
messages.info(request, f'Для данной серии было сгенерировано {available_quantity} карт.\n'
f'Для оставшихся {card_number - available_quantity} карт установите другую серию.')
messages.success(request, 'Карты успешно добавлены в базу данных!')
return redirect('card_generator')
else:
messages.error(request, form.non_field_errors())
else:
form = CardGenerateForm()
context = {
'form': form
}
return render(request, 'card_generator.html', context)
def generate_card_numbers(card_series, digits_number, card_number):
"""
Generates a list of card numbers depending on
the number of cards entered by the user
"""
cards = Card.objects.filter(card_series=card_series)
existing_card_numbers = [card.card_number for card in cards]
lower_range_limit = int('1' + '0'*(digits_number - 1))
upper_range_limit = int('9'*digits_number)
cards_numbers = random.sample(range(lower_range_limit, upper_range_limit + 1), card_number)
unique_card_numbers = [num for num in cards_numbers if num not in existing_card_numbers]
return unique_card_numbers
def card_profile(request, card_id):
"""Render card profile page"""
if request.user.is_authenticated:
card = Card.objects.get(id=card_id)
context = {
'card': card
}
return render(request, 'card_profile.html', context)
else:
messages.error(request, 'Чтобы открыть профиль карты, войдите в систему!')
return redirect('cards_list')
def delete_card(request, card_id):
"""Delete chose card"""
if request.user.is_authenticated:
card = Card.objects.get(id=card_id)
card.delete()
return redirect('cards_list')
else:
messages.error(request, 'Чтобы удалить карту, войдите в систему!')
return redirect('cards_list')
def activate_card(request, card_id):
"""Activate or deactivate card"""
if request.user.is_authenticated:
card = Card.objects.get(id=card_id)
card_status = card.status
if card_status == 'activated':
card.status = 'not_activated'
else:
card.status = 'activated'
card.save()
return redirect('cards_list')
else:
messages.error(request, 'Чтобы изменить статус карты, войдите в систему!')
return redirect('cards_list')
def search(request):
"""Search card by keyword"""
all_cards = None
if 'query' in request.GET:
query = request.GET['query']
if query:
cards = Card.objects.order_by('-created_date').filter(
Q(card_series__icontains=query) |
Q(card_number__icontains=query) |
Q(created_date__icontains=query) |
Q(end_activity_date__icontains=query) |
Q(status__icontains=query)
)
cards_idx = [i + 1 for i, card in enumerate(cards)]
if not query:
return redirect('cards_list')
context = {
'cards': cards,
'cards_idx': cards_idx
}
return render(request, 'cards_list.html', context)
| slychagin/cards-app | card/views.py | views.py | py | 5,250 | python | en | code | 1 | github-code | 13 |
26376674088 | # it needs pillow python library
import os
from PIL import Image, ImageEnhance, ImageFilter
path = "./imgs"
pathout = "/pyimgs"
for filename in os.listdir(path):
if filename == ".DS_Store":
continue
img = Image.open(f"{path}/{filename}")
edit = img.filter(ImageFilter.SHARPEN).convert('L')
clean_name = os.path.splitext(filename)[0]
edit.save(f".{pathout}/{clean_name}_edited.jpg") | Clearviss/A-D-inc. | Python_Projects/pajton/pyphotoeditor.py | pyphotoeditor.py | py | 412 | python | en | code | 1 | github-code | 13 |
29267560679 | from django.shortcuts import render
from django.shortcuts import HttpResponse
from .forms import *
from django.http import HttpResponseRedirect
import time
# Create your views here.
def index(request):
if request.method=='GET':
start = time.clock()
form1=inputform(request.GET)
if form1.is_valid():
nterms = form1.cleaned_data['number']
# first two terms
n1 = 1
n2 = 1
count = 0
# check if the number of terms is valid
if nterms <= 0:
result = 'Output: Please enter a positive integer.'
elif nterms == 1:
series = '['+str(n1)+']'
result = 'Output: '+series+'.'
else:
series = '['
while count < nterms:
series = series+str(n1)+','
nth = n1 + n2
# update values
n1 = n2
n2 = nth
count += 1
series = series+']'
result = 'Output: '+series+'.'
totalTimeTaken = 'Time taken to get result: '+str(time.clock() - start)
#return HttpResponseRedirect('index.html')
return HttpResponse(result+'<br>'+totalTimeTaken)
else:
form1 = inputform()
return render(request, 'fibonacciSeries/index.html', {'frm':form1})
else:
return render(request, 'fibonacciSeries/index.html')
| jatinjade007/fibonacciSeries | fibonacciSeries/views.py | views.py | py | 1,512 | python | en | code | 0 | github-code | 13 |
32256798783 | from mycroft import MycroftSkill, intent_file_handler
# from mycroft.util import play_wav
from mycroft.skills.audioservice import AudioService
import os
import wave
import struct
import math
import time
#
class SoundTuner(MycroftSkill):
def __init__(self):
MycroftSkill.__init__(self)
def initialize(self):
self.register_entity_file("instrument.entity")
self.register_entity_file("string.entity")
labels = ['A', 'A#', 'B', 'C', 'C#',
'D', 'D#', 'E', 'F', 'E#', 'G', 'G#']
# name is the complete name of a note (label + octave). the parameter
# n is the number of half-tone from A4 (e.g. D#1 is -42, A3 is -12, A5 is 12)
def name(n): return labels[n % len(labels)] + str(int((n+(9+4*12))/12))
# the frequency of a note. the parameter n is the number of half-tones
# from a4, which has a frequency of 440Hz, and is our reference note.
def freq(n): return int(440*(math.pow(2, 1/12)**n))
# a dictionnary associating note frequencies to note names
self.NOTES = {name(n): freq(n) for n in range(-42, 60)}
# Guitar strings are E2=82.41Hz, A2=110Hz, D3=146.8Hz, G3=196Hz, B3=246.9Hz, E4=329.6Hz
# Bass strings are (5th string) B0=30.87Hz, (4th string) E1=41.20Hz, A1=55Hz, D2=73.42Hz, G2=98Hz
# Mandolin & violin strings are G3=196Hz, D4=293.7Hz, A4=440Hz, E5=659.3Hz
# Viola & tenor banjo strings are C3=130.8Hz, G3=196Hz, D4=293.7Hz, A4=440Hz
# Cello strings are C2=65.41Hz, G2=98Hz, D3=146.8Hz, A3=220Hz
self.INSTRUMENT = {
'guitar': {'LOW E': 'E2', 'A': 'A2', 'D': 'D3', 'G': 'G3', 'B': 'B3', 'HIGH E': 'E4'},
'mandolin': {'G': 'G3', 'D': 'D4', 'A': 'A4', 'E': 'E5'},
'violin': {'G': 'G3', 'D': 'D4', 'A': 'A4', 'E': 'E5'},
'cello': {'C': 'C2', 'G': 'G2', 'D': 'D3', 'A': 'A3'},
'viola': {'C': 'C3', 'G': 'G3', 'D': 'D4', 'A': 'A4'},
'banjo': {'C': 'C3', 'G': 'G3', 'D': 'D4', 'A': 'A4'},
'bass': {'B': 'B0', 'E': 'E1', 'A': 'A1', 'D': 'D2', 'G': 'G2'}}
@intent_file_handler('tuner.sound.intent')
def handle_tuner_sound(self, message):
message = str.upper(message.data.get("note"))
response = {'note': message}
if self.NOTES.get(message):
self.speak_dialog('tuner.sound', data=response, wait=True)
self.make_sound(message)
elif self.NOTES.get(message + '4'):
self.speak_dialog('tuner.sound', data=response, wait=True)
self.make_sound(message + '4')
else:
self.speak_dialog('can_not_do', data=response, wait=True)
@intent_file_handler('instrument.intent')
def handle_instrument(self, message):
instrument = message.data.get('instrument')
string = message.data.get('string')
string_lookup = str.upper(string)
response = {'instrument': instrument, 'string': string}
self.log.info(message)
self.log.info(instrument)
self.log.info(string_lookup)
if string_lookup in self.INSTRUMENT[instrument]:
note = self.INSTRUMENT[instrument][string_lookup]
self.speak_dialog('instrument', data=response, wait=True)
self.make_sound(note)
else:
self.speak_dialog('can_not_do_instrument',
data=response, wait=True)
def make_sound(self, note):
sampleRate = 48000.0 # hertz
duration = 2.0
frequency = self.NOTES[note]
wavef = wave.open('/tmp/sound.wav', 'w')
wavef.setnchannels(1) # mono
wavef.setsampwidth(2)
wavef.setframerate(sampleRate)
for i in range(int(duration * sampleRate)):
value = int(32767.0*math.cos(frequency *
math.pi*float(i)/float(sampleRate)))
data = struct.pack('<h', value)
wavef.writeframesraw(data)
wavef.close()
AudioService.play(self, tracks='file:///tmp/sound.wav')
time.sleep(duration)
os.remove('/tmp/sound.wav')
def create_skill():
return SoundTuner()
| andlo/sound-tuner-skill | __init__.py | __init__.py | py | 4,174 | python | en | code | 2 | github-code | 13 |
29880592767 | """Exporter module."""
import asyncio
import sys
from logging import getLogger
from typing import Any, Dict, List
from prometheus_client import CollectorRegistry, Gauge, start_http_server
from prometheus_juju_exporter.collector import Collector
from prometheus_juju_exporter.config import Config
class ExporterDaemon:
"""Core class of the exporter daemon."""
def __init__(self) -> None:
"""Create new daemon and configure runtime environment."""
self.config = Config().get_config()
self.logger = getLogger(__name__)
self.logger.info("Parsed config: %s", self.config.config_dir())
self._registry = CollectorRegistry()
self.metrics: Dict[str, Gauge] = {}
self.collector = Collector()
self.logger.debug("Exporter initialized")
def _create_metrics_dict(self, gauge_name: str, gauge_desc: str, labels: List[str]) -> None:
"""Create a dict of gauge instances.
:param str gauge_name: the name of the gauge
:param str gauge_desc: the description of the gauge
:param List[str] labels: the label set of the gauge
"""
if gauge_name not in self.metrics:
self.logger.debug("Creating Gauge %s", gauge_name)
self.metrics[gauge_name] = Gauge(
gauge_name, gauge_desc, labelnames=labels, registry=self._registry
)
def update_registry(self, data: Dict[str, Any]) -> None:
"""Update the registry with newly collected values.
:param dict data: the machine data collected by the Collector method
"""
for gauge_name, values in data.items():
self._create_metrics_dict(
gauge_name=gauge_name,
gauge_desc=values["gauge_desc"],
labels=values["labels"],
)
for labels, value in values["labelvalues_update"]:
self.logger.debug("Updating Gauge %s, %s: %s", gauge_name, labels, value)
self.metrics[gauge_name].labels(**labels).set(value)
previous_labels = set()
for metric in self._registry.collect():
for sample in metric.samples:
if sample.name == gauge_name:
previous_labels.add(tuple(sample.labels.values()))
current_labels = set()
for value in values["labelvalues_update"]:
current_labels.add(tuple(value[0].values()))
stale_labels = previous_labels - current_labels
for labels in stale_labels:
self.logger.debug("Deleting labelvalues %s from %s...", labels, gauge_name)
self.metrics[gauge_name].remove(*labels)
async def trigger(self) -> None:
"""Call Collector and configure prometheus_client gauges from generated stats."""
while True:
try:
self.logger.info("Collecting gauges...")
data = await self.collector.get_stats()
self.update_registry(data)
self.logger.info("Gauges collected and ready for exporting.")
await asyncio.sleep(self.config["exporter"]["collect_interval"].get(int) * 60)
except Exception as err: # pylint: disable=W0703
self.logger.error("Collection job resulted in error: %s", err)
sys.exit(1)
def run(self) -> None:
"""Run exporter."""
self.logger.debug("Running prometheus client http server.")
start_http_server(
self.config["exporter"]["port"].get(int),
registry=self._registry,
)
try:
asyncio.run(self.trigger())
except KeyboardInterrupt as err:
# Gracefully handle keyboard interrupt
self.logger.info("%s: Exiting...", err)
sys.exit(0)
| canonical/prometheus-juju-exporter | prometheus_juju_exporter/exporter.py | exporter.py | py | 3,842 | python | en | code | 1 | github-code | 13 |
10907049075 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
__version__ = '0.1a1'
CHUNKSIZE = 67108864 # 64 MiB size, for hashing in chunks
import sys
import os
import os.path
import argparse
from pathlib import Path
import hashlib
from datetime import datetime
import csv
from tqdm import tqdm
cli = argparse.ArgumentParser()
cli.add_argument("src_dir", type=str,
help="source directory to be scanned")
cli.add_argument('-n', '--nohash', action='store_true',
help='do not calculate hashes, only generate file info tree')
cli.add_argument("-o", "--outpath", type=str,
help="path or pathname of result file")
clargs = cli.parse_args()
print('')
print("This is MANBAMM's superhash - v"+__version__+" - by M.H.V. Werts, 2022")
print("")
dtn = datetime.now()
p_src = Path(clargs.src_dir)
if not p_src.is_dir():
sys.exit("Specified source is not a directory.")
p_src_abs = p_src.resolve(strict=True)
md5st = 'noMD5' if clargs.nohash else ''
dts = dtn.strftime('%y%m%d_%H%M%S')
result_file = p_src_abs.stem+"_sh"+dts+md5st+".tsv"
if clargs.outpath is None:
p_result = Path(result_file)
else:
p_out = Path(clargs.outpath)
if p_out.is_dir():
p_result = Path(p_out, result_file)
else:
result_file = clargs.outpath
p_result = Path(result_file)
p_result_abs = p_result.resolve(strict=False)
print('Source directory: ', str(p_src))
print('Output file : ', str(p_result))
print('')
with open(p_result, 'w') as fout:
writer = csv.writer(fout, delimiter='\t', quoting=csv.QUOTE_NONE)
writer.writerow(['# superhash-version', __version__])
writer.writerow(['# superhash-start-timestamp-iso', dtn.isoformat()])
writer.writerow(['# absolute-path-source-dir',p_src_abs.as_posix()])
writer.writerow(['# absolute-path-superhash-file',p_result_abs.as_posix()])
writer.writerow(['# timestamp_iso',
'rel_path_posix',
'filename',
'mtime_iso',
'size',
'md5digest'])
# sorts according to root, keeping the subdirs and files in sync
walklist = sorted(list(os.walk(p_src_abs)))
for root, subdirs, files in tqdm(walklist):
checksums = []
rootrelative = os.path.relpath(root, p_src_abs.parent)
# enforce storing pathnames as posix
rootrel_posix = Path(rootrelative).as_posix()
# sort also the files inside each directory
for file in tqdm(sorted(files), leave = False):
filepath = Path(root, file)
if (filepath.resolve() == p_result_abs):
tqdm.write('... skipping result file itself ('\
+str(p_result)+')')
else:
fpstat = filepath.stat()
fpsize = fpstat.st_size
mtime_iso = datetime.fromtimestamp(fpstat.st_mtime).isoformat()
with open(filepath, 'rb') as _file:
if clargs.nohash:
md5digest = ''
else:
cumhash = hashlib.md5()
for chunk in iter(lambda: _file.read(CHUNKSIZE), b''):
cumhash.update(chunk)
md5digest = cumhash.hexdigest()
timestamp_iso = datetime.now().isoformat()
checksums.append([timestamp_iso,
rootrel_posix,
file,
mtime_iso,
fpsize,
md5digest])
writer.writerows(checksums)
print('')
print('')
| mhvwerts/MANBAMM-data-management | superhash.py | superhash.py | py | 3,685 | python | en | code | 0 | github-code | 13 |
12895278993 | """
:mod:`Abstract data source <src.system.data_sources.data_source>` for a rider.
"""
from typing import Dict
from src.system.data_sources.data_source.python_dict import DataSourcePythonDict
from src.system.constants import DEFAULT_COL
class BaseRider(
DataSourcePythonDict
):
"""
:mod:`Abstract data source <src.system.data_sources.data_source>` for a rider.
"""
def __init__(
self,
data: Dict
):
"""
Constructor method. Initializes an abstract rider based on data within an annuity model point file.
:param data: Data for any single rider.
"""
DataSourcePythonDict.__init__(
self=self,
data=data
)
@property
def rider_type(
self
) -> str:
"""
Rider type, used to indicate what kind of rider this is.
:return: Rider type.
"""
return self.cache[DEFAULT_COL]['rider_type']
@property
def rider_name(
self
) -> str:
"""
Human-readable rider name.
:return: Rider name.
"""
return self.cache[DEFAULT_COL]['rider_name']
| chingdaotze/actuarial-model | src/data_sources/annuity/model_points/model_point/riders/base.py | base.py | py | 1,164 | python | en | code | 1 | github-code | 13 |
2856051038 | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
from absl import flags
import numpy as np
import tensorflow.compat.v2 as tf
from valan.framework import common
from valan.framework import hparam
from valan.r2r import constants
from valan.r2r import env_ndh
from valan.r2r import env_ndh_config as env_config
from valan.r2r import env_test
FLAGS = flags.FLAGS
class NDHEnvTest(tf.test.TestCase):
def setUp(self):
super(NDHEnvTest, self).setUp()
self.data_dir = FLAGS.test_srcdir + (
'valan/r2r/testdata')
self.reward_fn_type = 'distance_to_goal'
self._env_config = hparam.HParams(
problem='NDH',
history='all',
path_type='trusted_path',
max_goal_room_panos=4,
scan_base_dir=self.data_dir,
data_base_dir=os.path.join(self.data_dir, 'NDH'),
vocab_file='vocab.txt',
images_per_pano=36,
max_conns=14,
image_encoding_dim=64,
direction_encoding_dim=256,
image_features_dir=os.path.join(self.data_dir, 'image_features'),
instruction_len=50,
max_agent_actions=6,
reward_fn_type=self.reward_fn_type,
reward_fn=env_config.RewardFunction.get_reward_fn(self.reward_fn_type))
self._runtime_config = common.RuntimeConfig(task_id=0, num_tasks=1)
self._env = env_ndh.NDHEnv(
data_sources=['small_split'],
runtime_config=self._runtime_config,
env_config=self._env_config)
# For deterministic behavior in test
np.random.seed(0)
def _get_pano_id(self, pano_name, scan_id):
return self._env._scan_info[scan_id].pano_name_to_id[pano_name]
def testStepToGoalRoom(self):
self.reward_fn_type = 'distance_to_room'
self._env_config = hparam.HParams(
problem='NDH',
history='all',
path_type='trusted_path',
max_goal_room_panos=4,
scan_base_dir=self.data_dir,
data_base_dir=os.path.join(self.data_dir, 'NDH'),
vocab_file='vocab.txt',
images_per_pano=36,
max_conns=14,
image_encoding_dim=64,
direction_encoding_dim=256,
image_features_dir=os.path.join(self.data_dir, 'image_features'),
instruction_len=50,
max_agent_actions=6,
reward_fn_type=self.reward_fn_type,
reward_fn=env_config.RewardFunction.get_reward_fn(self.reward_fn_type))
self._runtime_config = common.RuntimeConfig(task_id=0, num_tasks=1)
self._env = env_ndh.NDHEnv(
data_sources=['small_split'],
runtime_config=self._runtime_config,
env_config=self._env_config)
scan_id = 0 # testdata only has single scan 'gZ6f7yhEvPG'
_ = self._env.reset()
golden_path = [
'ba27da20782d4e1a825f0a133ad84da9',
'47d8a8282c1c4a7fb3eeeacc45e9d959', # in the goal room
'0ee20663dfa34b438d48750ddcd7366c' # in the goal room
]
# Step through the trajectory and verify the env_output.
for i, action in enumerate(
[self._get_pano_id(p, scan_id) for p in golden_path]):
expected_time_step = i + 1
expected_heading, expected_pitch = self._env._get_heading_pitch(
action, scan_id, expected_time_step)
if i + 1 < len(golden_path):
expected_oracle_action = self._get_pano_id(golden_path[i + 1], scan_id)
else:
expected_oracle_action = constants.STOP_NODE_ID
expected_reward = 1 if i <= 1 else 0
env_test.verify_env_output(
self,
self._env.step(action),
expected_reward=expected_reward, # Moving towards goal.
expected_done=False,
expected_info='',
expected_time_step=expected_time_step,
expected_path_id=318,
expected_pano_name=golden_path[i],
expected_heading=expected_heading,
expected_pitch=expected_pitch,
expected_scan_id=scan_id,
expected_oracle_action=expected_oracle_action)
# Stop at goal pano. Terminating the episode results in resetting the
# observation to next episode.
env_test.verify_env_output(
self,
self._env.step(constants.STOP_NODE_ID),
expected_reward=4, # reached goal and stopped
expected_done=True, # end of episode
expected_info='',
# observation for next episode.
expected_time_step=0,
expected_path_id=1304,
expected_pano_name='80929af5cf234ae38ac3a2a4e60e4342',
expected_heading=6.101,
expected_pitch=0.,
expected_scan_id=scan_id,
expected_oracle_action=self._get_pano_id(
'ba27da20782d4e1a825f0a133ad84da9', scan_id))
def testGetAllPaths(self):
def _get_all_paths(history):
return env_ndh._get_all_paths_ndh(
data_sources=['small_split'],
data_base_dir=os.path.join(self.data_dir, 'NDH'),
vocab_file='vocab.txt',
fixed_instruction_len=50,
history=history,
path_type='trusted_path')
# <PAD> is 0, <UNK> is 1, <NAV> is 3, <ORA> is 4, <TAR> is 5.
all_paths = _get_all_paths('none')
self.assertEqual(0, all_paths[0]['instruction_len'])
np.testing.assert_array_equal(all_paths[0]['instruction_token_ids'],
[0] * 50)
all_paths = _get_all_paths('target')
self.assertEqual(2, all_paths[0]['instruction_len'])
np.testing.assert_array_equal(all_paths[0]['instruction_token_ids'],
[5, 66] + [0] * 48)
all_paths = _get_all_paths('oracle_ans')
self.assertEqual(9, all_paths[0]['instruction_len'])
np.testing.assert_array_equal(
all_paths[0]['instruction_token_ids'],
[
4, 87, 91, 86, 97, 121, 66, # ora_ans
5, 66, # target
] + [0] * 41)
all_paths = _get_all_paths('nav_q_oracle_ans')
self.assertEqual(18, all_paths[0]['instruction_len'])
np.testing.assert_array_equal(
all_paths[0]['instruction_token_ids'],
[
3, 254, 88, 122, 1, 90, 87, 91, 89, # nav_q
4, 87, 91, 86, 97, 121, 66, # ora_ans
5, 66, # target
] + [0] * 32)
all_paths = _get_all_paths('all')
self.assertEqual(32, all_paths[0]['instruction_len'])
np.testing.assert_array_equal(
all_paths[0]['instruction_token_ids'],
[
3, 254, 88, 142, 97, 118, 90, 221, 91, 87, 91, 89, # nav_q
4, 299, # ora_ans
3, 254, 88, 122, 1, 90, 87, 91, 89, # nav_q
4, 87, 91, 86, 97, 121, 66, # ora_ans
5, 66, # target
] + [0] * 18)
if __name__ == '__main__':
tf.enable_v2_behavior()
tf.test.main()
| google-research/valan | r2r/env_ndh_test.py | env_ndh_test.py | py | 6,708 | python | en | code | 69 | github-code | 13 |
74324842577 | # %%
import re
import sys
# %%
def replace_latex_math_mode(text):
"""
Replaces LaTeX math mode expressions ($...$) with [tex: ...] in a given text.
"""
# Regular expression to match LaTeX math mode expressions
latex_math_mode_pattern = r'\$(.*?)\$'
# Replace LaTeX math mode expressions with [tex: ...]
replaced_text = re.sub(latex_math_mode_pattern, r'[tex: \1] ', text)
return replaced_text
def replace_extension(filename):
# Check if the file has a ".md" extension
if filename.endswith(".md"):
# Replace ".md" with "_hatena.md"
replaced_filename = filename.replace(".md", "_hatena.md")
return replaced_filename
else:
# If the file does not have a ".md" extension, return the original filename
return filename
# %% GPT
# import re
def rewrite_underscores(expression):
# Regular expression pattern to match the expression enclosed in $'s
pattern = r'\$([^$]+)\$'
# Find all matches of the pattern in the expression
matches = re.findall(pattern, expression)
# Loop through each match and replace underscores with \_
for match in matches:
# Replace underscores with \_
replaced = match.replace('_', r'\_')
# added manually
# brackets escaped.
replaced = replaced.replace('[', r'\\[')
replaced = replaced.replace(']', r'\\]')
# Replace the original match with the replaced one in the expression
expression = expression.replace(f'${match}$', f'${replaced}$')
return expression
# %%
args = sys.argv
# file_name = input()#"01.md"
file_name = args[1]#"01.md"
with open(file_name) as f:
latex_text = f.read()
# $ underscores $
replaced_text = rewrite_underscores(latex_text)
# [tex: ]
replaced_text = replace_latex_math_mode(replaced_text)
# file name
replaced_filename = replace_extension(file_name)
with open(replaced_filename, 'w') as f:
f.write(replaced_text)
# %%
################
# memo
################
def find_displaymath(x):
return re.findall(r"\$\$.+\$\$", x)
# %%
def find_inlinemath(x):
if find_displaymath(x):
raise ValueError("include display math mode")
y = re.findall(r"\$[^\$]+\$", x)
return y
# %%
# %%
def search_inlinemath(x):
if find_displaymath(x):
raise ValueError("include display math mode")
y = re.search(r"\$[^\$]+\$", x)
return y
# a = "$ a $, $ \gamma_{0}$, \n $b$"
# %% | Krypf/useful_program_20230616 | markdown_to_hatena.py | markdown_to_hatena.py | py | 2,478 | python | en | code | 0 | github-code | 13 |
40640156881 | # ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# Format insar names and images
#
# By Rob Zinke 2019
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# Import modules
from datetime import datetime, time
import numpy as np
import matplotlib.pyplot as plt
from osgeo import gdal
####################################
### --- Geographic transform --- ###
####################################
# --- Convert pixel location to map location ---
# Pixels to map coordinates
def px2coords(tnsf,px,py):
left=tnsf[0]; dx=tnsf[1]
top=tnsf[3]; dy=tnsf[5]
xcoord=left+px*dx
ycoord=top+py*dy
return xcoord, ycoord
# Map coordinates to pixels
def coords2px(tnsf,lon,lat):
left=tnsf[0]; dx=tnsf[1]
top=tnsf[3]; dy=tnsf[5]
px=int((lon-left)/dx)
py=int((top-lat)/dy)
return px,py
# --- Simple map extent ---
# Convert geo transform to extent
def transform2extent(tnsf,M,N):
left=tnsf[0]; dx=tnsf[1]; right=left+dx*N
top=tnsf[3]; dy=tnsf[5]; bottom=top+dy*M
extent=(left,right,bottom,top)
return extent
# --- GDAL geographic transform ---
# Format transform data into something useful
class GDALtransform:
def __init__(self,DS=None,transform=None,shape=None,vocal=False):
# transform comes from data.GetGeoTransform()
# shape comes from data.GetRasterBand(#).ReadAsArray().shape
if DS is not None:
transform=DS.GetGeoTransform()
shape=(DS.RasterYSize,DS.RasterXSize)
self.m=shape[0]
self.n=shape[1]
self.xstart=transform[0]
self.ystart=transform[3]
self.ystep=transform[5]
self.xstep=transform[1]
self.xend=self.xstart+shape[1]*self.xstep
self.yend=self.ystart+shape[0]*self.ystep
self.ymin=np.min([self.yend,self.ystart])
self.ymax=np.max([self.yend,self.ystart])
self.xmin=np.min([self.xend,self.xstart])
self.xmax=np.max([self.xend,self.xstart])
self.extent=[self.xmin,self.xmax,self.ymin,self.ymax]
self.bounds=[self.xmin,self.ymin,self.xmax,self.ymax]
# Print outputs?
if vocal is not False:
print('Image properties: ')
print('\tNS-dim (m): %i' % self.m)
print('\tEW-dim (n): %i' % self.n)
print('\tystart: %f\tyend: %f' % (self.ystart,self.yend))
print('\txstart: %f\txend: %f' % (self.xstart,self.xend))
print('\tystep: %f\txstep: %f' % (self.ystep,self.xstep))
################################
### --- Image formatting --- ###
################################
# --- Save image using a template ---
# Wrapper for saving function
def save2tiff(array,method='parameters',template=None):
# Determine what to do by method
if method in ['template']:
save2tiff_template(array,template)
elif method in ['parameters']:
save2tiff_parameters(array,parameters)
# Using a given image
def save2tiff_template(array,template):
print('Does not work yet')
# m,n=array.shape
# proj=PHSlist[0].GetProjection()
# driver=gdal.GetDriverByName('GTiff')
# PHSfull=driver.Create(outDir+'StitchedPhase',n,m,1,gdal.GDT_Float32)
# PHSfull.GetRasterBand(1).WriteArray(self.StitchedPhase)
# PHSfull.SetProjection(proj)
# PHSfull.SetGeoTransform(tnsf)
# PHSfull.FlushCache()
# Using parameters
def save2tiff_parameters():
print('Does not work yet') | EJFielding/InsarToolkit | MetaFormatting/InsarFormatting.py | InsarFormatting.py | py | 3,125 | python | en | code | 4 | github-code | 13 |
25683926542 | #!/usr/bin/env python3
#from statistics import NormalDist
# sigma = standarddev/sqrt(n)
# Store 1 array of mu
# Store 1 array of sigma
# search with mu to limit
# In limited space, do below
#NormalDist(mu=2.5, sigma=1).overlap(NormalDist(mu=5.0, sigma=1))
# take average of all overlaps
#[0.1, 0.9, 0.3, 0.3] -> 0.8
# sqrt(sum of squares) for example
# third root (sum of x^3's)
# The higher power, the more you pick up maximums.
# Higher weights values closer to 1 higher
import torch
from transformers import AutoTokenizer, AutoModel
import numpy as np
from statistics import NormalDist
from scipy.stats import entropy
from scipy.sparse import diags
from scipy.spatial.distance import euclidean
import random
from transformer_infrastructure.hf_utils import build_index_flat, build_index_voronoi
from transformer_infrastructure.run_tests import run_tests
from transformer_infrastructure.hf_embed import parse_fasta_for_embed, get_embeddings
import copy
from Bio import SeqIO
from Bio.Align import MultipleSeqAlignment
from Bio.Seq import Seq
from Bio.SeqRecord import SeqRecord
from scipy.stats import multivariate_normal
#import tensorflow as tf
from time import time
from sklearn.preprocessing import normalize
import faiss
import pickle
import argparse
import os
import sys
import igraph
from pandas.core.common import flatten
import pandas as pd
from numba import njit
from collections import Counter
import matplotlib.pyplot as plt
import logging
from sklearn.metrics.pairwise import cosine_similarity
# This is in the goal of finding sequences that poorly match before aligning
# SEQSIM
def kl_mvn(m0, S0, m1, S1):
"""
Kullback-Liebler divergence from Gaussian pm,pv to Gaussian qm,qv.
Also computes KL divergence from a single Gaussian pm,pv to a set
of Gaussians qm,qv.
From wikipedia
KL( (m0, S0) || (m1, S1))
= .5 * ( tr(S1^{-1} S0) + log |S1|/|S0| +
(m1 - m0)^T S1^{-1} (m1 - m0) - N )
"""
# store inv diag covariance of S1 and diff between means
N = m0.shape[0]
iS1 = np.linalg.inv(S1)
diff = m1 - m0
# kl is made of three terms
tr_term = np.trace(iS1 @ S0)
det_term = np.log(np.linalg.det(S1)/np.linalg.det(S0)) #np.sum(np.log(S1)) - np.sum(np.log(S0))
quad_term = diff.T @ np.linalg.inv(S1) @ diff #np.sum( (diff*diff) * iS1, axis=1)
#print(tr_term,det_term,quad_term)
return .5 * (tr_term + det_term + quad_term - N)
def graph_from_distindex(index, dist):
#print("Create graph from dist index with threshold {}".format(seqsim_thresh))
edges = []
weights = []
complete = []
for i in range(len(index)):
#complete.append(i)
for j in range(len(index[i])):
#if j in complete:
# continue
# Index should not return negative
weight = dist[i,j]
if weight < 0:
#print("Index {}, {} returned negative similarity, replacing with 0.001".format(i,j))
weight = 0.001
edge = (i, index[i, j])
#if edge not in order_edges:
# Break up highly connected networks, simplify clustering
#if scoretype == "cosinesim":
#if weight >= seqsim_thresh:
edges.append(edge)
weights.append(weight)
#if scoretype == "euclidean":
# if weight <= seqsim_thresh:
# edges.append(edge)
# weights.append(weight)
print("edge preview", edges[0:15])
G = igraph.Graph.TupleList(edges=edges, directed=True) # Prevent target from being placed first in edges
G.es['weight'] = weights
#G = G.simplify(combine_edges = "first") # symmetrical, doesn't matter
return(G)
# If removing a protein leads to less of a drop in total edgeweight that other proteins
def candidate_to_remove(G, v_names,z = -5):
weights = {}
num_prots = len(G.vs())
print("num_prots")
if num_prots <=3:
return([])
for i in v_names:
# Potentially put in function
g_new = G.copy()
vs = g_new.vs.find(name = i)
weight = sum(g_new.es.select(_source=vs)['weight'])
weights[i] = weight
#weights.append(weight)
questionable_z = []
#print("Sequence z scores, current threshold: ", z)
for i in v_names:
others = []
for key,value in weights.items():
if key == i:
own_value = value
else:
others.append(value)
#others = [weights[x] for x in range(len(weights)) if x != i]
print(own_value, others)
seq_z = (own_value - np.mean(others))/np.std(others)
#seq_z = (weights[i] - np.mean(others))/np.std(others)
print("sequence ", i, " zscore ", seq_z)
# This should scale with # of sequences?
# If on average high similarity, don't call as questionable even if high z
# Avoid 1.65, 1.72, 1.71 three protein case.
#if (own_value / (num_prots - 1)) < 0.7:
if seq_z < z:
questionable_z.append(i)
print("questionalbe_z", questionable_z)
return(questionable_z)
def get_seq_groups2(seqs, seq_names, embedding_dict, logging, exclude, do_clustering, seqsim_thresh= 0.75):
numseqs = len(seqs)
#hstates_list, sentence_embeddings = get_hidden_states(seqs, model, tokenizer, layers, return_sentence = True)
#logging.info("Hidden states complete")
#print("end hidden states")
#if padding:
# logging.info("Removing {} characters of neutral padding X".format(padding))
# hstates_list = hstates_list[:,padding:-padding,:]
#padded_seqlen = embedding_dict['aa_embeddings'].shape[1]
#logging.info("Padded sequence length: {}".format(padded_seqlen))
k_select = numseqs
sentence_array = np.array(embedding_dict['sequence_embeddings'])
#print("sentnece array shape", sentence_array.shape)
if sentence_array.shape[1] > 1024:
sentence_array = sentence_array[:,:1024]
#print(sentence_array.shape)
#print("sentence_array", sentence_array)
#print(sentence_array.shape)
s_index = build_index_flat(sentence_array)
#print(numseqs, k_select)
s_distance, s_index2 = s_index.search(sentence_array, k = k_select)
#print(s_distance)
#print(s_index2)
G = G.simplify(combine_edges = "first") # symmetrical, doesn't matter
#print(G)
to_exclude = []
group_hstates_list = []
cluster_seqnums_list = []
cluster_names_list = []
cluster_seqs_list = []
# TODO use two variable names for spaced and unspaced seqs
logging.info("Removing spaces from sequences")
#if padding:
# seqs = [x.replace(" ", "")[padding:-padding] for x in seqs]
#else:
# seqs = [x.replace(" ", "") for x in seqs]
#prev_to_exclude = []
if do_clustering == True:
#print("fastgreedy")
#print(G)
#repeat = True
#
#while repeat == True:
d = sentence_array.shape[1]
for k in range(1, 20):
kmeans = faiss.Kmeans(d = d, k = k, niter = 20)
kmeans.train(sentence_array)
D, I = kmeans.index.search(sentence_array, 1)
print("D", D)
print("I", I)
clusters = I.squeeze()
labels = list(zip(G.vs()['name'], clusters))
#for x in labels:
# print("labels", x[0], x[1])
group_hstates_list = []
cluster_seqnums_list = []
cluster_names_list = []
cluster_seqs_list = []
prev_to_exclude = to_exclude
means = []
for clustid in list(set(clusters)):
print("eval clust", clustid)
clust_seqs = [x[0] for x in labels if x[1] == clustid]
print("clust_seqs", clust_seqs)
#print("labels from loop", labels)
#for lab in labels:
# print("labels", lab, lab[0], lab[1], clustid)
# if lab[1] == clustid:
#
# print("yes")
#print("GG", G.vs()['name'])
#print("GG", G.es()['weight'])
#edgelist = []
weightlist = []
for edge in G.es():
#print(edge, edge['weight'])
#print(G.vs[edge.target]["name"], G.vs[edge.source]["name"])
if G.vs[edge.target]["name"] in clust_seqs:
if G.vs[edge.source]["name"] in clust_seqs:
weightlist.append(edge['weight'])
print(G.vs[edge.target]["name"], G.vs[edge.source]["name"], edge['weight'])
print(weightlist)
print("clust {} mean {}".format(clustid, np.mean(weightlist)))
means.append(np.mean(weightlist))
print("k {} overall mean {}".format(clustid, np.mean(means)))
#return(0)
def seq_index_search(sentence_array, k_select, s_index = None):
#print("sentence_array", sentence_array)
if not s_index:
s_index = build_index_flat(sentence_array, scoretype = "cosinesim")
#sentence_array, norm = normalize(sentence_array, norm='l2', axis=0, copy=True, return_norm=True)
#faiss.normalize_L2(sentence_array)
s_distance, s_index2 = s_index.search(sentence_array, k = k_select)
return(s_distance, s_index2)
def get_seqsims(sentence_array, k = None, sentence_index = None):
'''
Take numpy array (float32) [[],[]] and calculate k-nearest neighbors either among array or from precomputed index of arrays.
If k is not provided, returned sequences will be the length of the sentence array if no precomputed index provided, or number of vectors in the index if that is provided.
Time to return additional k is negligable
'''
print("k", k)
if not k:
k = sentence_array.shape[0]
start_time = time()
if not sentence_index:
sentence_index = build_index_flat(sentence_array)
print("Searching index")
distances, indices = seq_index_search(sentence_array, k, sentence_index)
end_time = time()
print("Index searched in {} seconds".format( end_time - start_time))
start_time = time()
G = graph_from_distindex(indices, distances)
end_time = time()
print("Index converted to edges in {} seconds".format(end_time - start_time))
return(G, sentence_index)
def get_seqsim_args():
parser = argparse.ArgumentParser()
parser.add_argument("-i", "--in", dest = "fasta_path", type = str, required = True,
help="Path to fasta")
parser.add_argument("-e", "--emb", dest = "embedding_path", type = str, required = False,
help="Path to embeddings")
parser.add_argument("-o", "--outfile", dest = "out_path", type = str, required = True,
help="Path to outfile")
parser.add_argument("-ml", "--minlength", dest = "minlength", type = int, required = False,
help="If present, minimum length of sequences to search against index")
parser.add_argument("-ex", "--exclude", dest = "exclude", action = "store_true",
help="Exclude outlier sequences from initial alignment process")
parser.add_argument("-dx", "--index_means", dest = "index_file", required = False,
help="Prebuilt index of means")
parser.add_argument("-dxs", "--index_sigmas", dest = "index_file_sigmas", required = False,
help="Prebuilt index of sigmas (standard deviations)")
parser.add_argument("-dxn", "--index_names", dest = "index_names_file", required = False,
help="Prebuilt index names, One protein name per line, in order added to index")
parser.add_argument("-ss", "--strategy", dest = "strat", type = str, required = False, default = "mean", choices = ['mean','meansig'],
help="Whether to search with cosine similarity of mean only, or follow by comparison of gaussians")
parser.add_argument("-fx", "--fully_exclude", dest = "fully_exclude", action = "store_true",
help="Additionally exclude outlier sequences from final alignment")
parser.add_argument("-l", "--layers", dest = "layers", nargs="+", type = int,
help="Which layers (of 30 in protbert) to select")
parser.add_argument("-hd", "--heads", dest = "heads", type = str,
help="File will one head identifier per line, format layer1_head3")
parser.add_argument("-st", "--seqsimthresh", dest = "seqsimthresh", type = float, required = False, default = 0.75,
help="Similarity threshold for clustering sequences")
parser.add_argument("-s", "--scoretype", dest = "scoretype", type = str, required = False, default = "cosinesim", choices = ["cosinesim", "euclidean"],
help="How to calculate initial sequence similarity score")
parser.add_argument("-k", "--knn", dest = "k", type = int, required = False,
help="Limit edges to k nearest neighbors")
parser.add_argument("-m", "--model", dest = "model_name", type = str, required = True,
help="Model name or path to local model")
parser.add_argument("-p", "--pca_plot", dest = "pca_plot", action = "store_true", required = False,
help="If flagged, output 2D pca plot of amino acid clusters")
parser.add_argument("-l2", "--headnorm", dest = "headnorm", action = "store_true", required = False,
help="Take L2 normalization of each head")
args = parser.parse_args()
return(args)
def kl_gauss(m1, m2, s1, s2):
kl = np.log(s2/s1) + (s1**2 + (m1-m2)**2)/(2*s2**2) - 1/2
return(kl)
def get_ovl(m1, m2, s1, s2):
ovl = NormalDist(mu=m1, sigma=s1).overlap(NormalDist(mu=m2, sigma=s2))
return(ovl)
def get_w2(m1, m2, s1, s2):
w2 = np.sqrt((m1 - m2)**2 + (s1 - s2)**2)
return(w2)
@njit
def numba_w2(m1, m2, s1, s2, w2):
for i in range(m1.shape[0]):
w2[int(i)] = np.sqrt((m1[i] - m2[i])**2 + (s1[i] - s2[i])**2)
return w2
@njit
def numba_ovl(m1, m2, s1, s2, o):
for i in range(m1.shape[0]):
o[int(i)] = NormalDist(mu=m1[i], sigma=s1[i]).overlap(NormalDist(mu=m2[i], sigma=s2[i]))
return(o)
if __name__ == '__main__':
true_start = time()
args = get_seqsim_args()
print("args parsed", time() - true_start)
fasta_path = args.fasta_path
embedding_path = args.embedding_path
minlength = args.minlength
outfile = args.out_path
exclude = args.exclude
fully_exclude = args.fully_exclude
layers = args.layers
heads = args.heads
index_file = args.index_file
index_file_sigmas = args.index_file_sigmas
index_names_file = args.index_names_file
model_name = args.model_name
pca_plot = args.pca_plot
headnorm = args.headnorm
seqsim_thresh = args.seqsimthresh
k = args.k
strat = args.strat
scoretype = args.scoretype
s_index = None
s_sigma_index = None
# Keep to demonstrate effect of clustering or not
#do_clust return(ovl)ering = True
logname = "align.log"
#print("logging at ", logname)
log_format = "%(asctime)s::%(levelname)s::"\
"%(filename)s::%(lineno)d::%(message)s"
logging.basicConfig(filename=logname, level='DEBUG', format=log_format)
if heads is not None:
with open(heads, "r") as f:
headnames = f.readlines()
print(headnames)
headnames = [x.replace("\n", "") for x in headnames]
print(headnames)
else:
headnames = None
logging.info("Check for torch")
logging.info(torch.cuda.is_available())
padding = 0
logging.info("model: {}".format(model_name))
logging.info("fasta: {}".format(fasta_path))
logging.info("padding: {}".format(padding))
faiss.omp_set_num_threads(10)
print("MINLENGTH", minlength)
if minlength:
seq_names, seqs, seqs_spaced = parse_fasta_for_embed(fasta_path, padding = padding, minlength = minlength)
else:
seq_names, seqs, seqs_spaced = parse_fasta_for_embed(fasta_path, padding = padding)
print("seqs parsed", time() - true_start)
if embedding_path:
with open(embedding_path, "rb") as f:
embedding_dict = pickle.load(f)
else:
seqlens = [len(x) for x in seqs]
embedding_dict = get_embeddings(seqs_spaced,
model_name,
seqlens = seqlens,
get_sequence_embeddings = True,
get_aa_embeddings = False,
layers = layers,
padding = padding,
heads = headnames,
strat = strat)
print("embeddings made", time() - true_start)
print("getting sequence similarities")
if index_file:
if not index_names_file:
print("Provide file of index names in order added to index")
exit(1)
else:
with open(index_names_file, "r") as infile:
#index_names = infile.readlines()
#index_names = [x.replace("\n", "").split(",") for x in index_names]
#Read as {idx:proteinID}
df = pd.read_csv(infile, header= None)
df.columns = ['prot', 'idx']
index_names = dict(zip(df.idx,df.prot))
#index_names = index_names.set_index(['idx'])
#print(index_names)
#index_names = index_names.to_dict('index')
#print(index_names)
# Don't use seqnames from input fasta, use index seqnames
start_time = time()
s_index = faiss.read_index(index_file)
if strat == "meansig":
if index_file_sigmas:
s_sigma_index = faiss.read_index(index_file_sigmas)
else:
s_sigma_index = None
sigma_embeddings = np.array(embedding_dict['sequence_embeddings_sigma']).astype(np.float32)
s_sigma_index = build_index_flat(sigma_embeddings, s_sigma_index)
end_time = time()
print("Loaded index(es) in {} seconds".format(end_time - start_time))
else:
if strat == "meansig":
s_sigma_index = None
sigma_embeddings = np.array(embedding_dict['sequence_embeddings_sigma']).astype(np.float32)
s_sigma_index = build_index_flat(sigma_embeddings, s_sigma_index)
index_names = seq_names
#kl = tf.keras.losses.KLDivergence()
# Step 1: Use means to get local area of sequences
sentence_array = np.array(embedding_dict['sequence_embeddings']).astype(np.float32)
if not k:
k = len(seqs)
G, s_index = get_seqsims(sentence_array, k = k, sentence_index = s_index)
print("similarities made", time() - true_start)
print(outfile)
print("#_vertices", len(G.vs()))
print("query_names", len(seq_names))
print("index_names", len(index_names))
named_vertex_list = G.vs()["name"]
print(named_vertex_list)
retrieve_start_time = time()
target_mean_dict = dict([(x, s_index.reconstruct(int(x))) for x in named_vertex_list])
target_sigma_dict = dict([(x, s_sigma_index.reconstruct(int(x))) for x in named_vertex_list])
retrieve_end_time = time()
amount = retrieve_end_time - retrieve_start_time
print("Vectors retrieved from index in ", amount)
vec_kl_gauss = np.vectorize(kl_gauss)
vec_get_ovl = np.vectorize(get_ovl)
vec_get_w2 = np.vectorize(get_w2)
sentence_array = embedding_dict['sequence_embeddings']
#faiss.normalize_L2(sentence_array)
sigma_array = embedding_dict['sequence_embeddings_sigma']
#faiss.normalize_L2(sigma_array)
#sentence_array_l2norm = normalize(sentence_array, norm='l2', axis=1, copy=True)
with open(outfile, "w") as o:
#o.write("source,target,score,overlap,kl,w2_mean,w2_vec,euc_mean,euc_sigma\n")
o.write("source,target,distance,cosinesim,w2_mean,w2_mean_neg_e,w2_mean_neg_e_1_10\n")
e_start = time()
for edge in G.es():
#print(edge)
#print(G.vs()[edge.source], G.vs()[edge.target], edge['weight'])
source_idx = int(G.vs()[edge.source]['name'])
target_idx = int(G.vs()[edge.target]['name'])
#print(source_idx, target_idx)
if source_idx == -1:
continue
if target_idx == -1:
continue
source = seq_names[source_idx]
target = index_names[target_idx]
weight = edge['weight']
d_start = time()
source_mean = sentence_array[source_idx]
source_sigma = sigma_array[source_idx]
#print(source_mean)
#source_mean = vertex_mean_dict[source_idx]
#source_sigma = vertex_sigma_dict[source_idx]
target_mean = target_mean_dict[target_idx]
#print(target_mean)
target_sigma = target_sigma_dict[target_idx]
#print(source_mean)
#print(target_mean)
#print(source_sigma)
#print(target_sigma)
#d_end = time()
d_span = time() -d_start
cosinesim = cosine_similarity([source_mean], [target_mean])
#print(cosinesim)
#print(cosinesim[0][0])
cosinesim = cosinesim[0][0]
##source_mean = s_index.reconstruct(source_idx)
#source_sigma = s_sigma_index.reconstruct(source_idx)
#target_mean = s_index.reconstruct(target_idx)
#target_sigma = s_sigma_index.reconstruct(target_idx)
#print("source_mean", source_mean)
#print("source_sigma", source_sigma)
#
# Do overlaps of each row
#arr =np.array([source_mean, target_mean, source_sigma, target_sigma])
#print(arr)
#o_start = time()
# This is too slow
#overlaps = [NormalDist(mu=m1, sigma=s1).overlap(NormalDist(mu=m2, sigma=s2)) for m1, m2, s1, s2 in zip(source_mean, target_mean, source_sigma, target_sigma)]
#mean_overlap = np.mean(overlaps)
#o_end = time()
#o_span = time() - o_start
#print(overlaps[0:5])
#overlaps = NormalDist(mu=source_mean, sigma=source_sigma).overlap(NormalDist(mu=target_mean, sigma=target_sigma))
###m_start = time()
#print("start kl")
###kls = vec_kl_gauss(source_mean, target_mean, source_sigma, target_sigma)
#print(kls)
###kl_out = 1- np.mean(kls)
#kl = kl_mvn(source_mean, source_sigma, target_mean, target_sigma)
#dim = len(source_mean)
#source_cov = diags(source_sigma, 0).toarray()
#target_cov = diags(target_sigma, 0).toarray()
#source_cov = np.zeros((dim,dim))
#np.fill_diagonal(source_cov, source_sigma) # This is inplace
#target_cov = np.zeros((dim,dim))
#np.fill_diagonal(target_cov, target_sigma) # This is inplace
#np.random.seed(10)
###m_span = time() - m_start
### k_start = time()
#kls = [kl_gauss(m1, s1, m2, s2) for m1, m2, s1, s2 in zip(source_mean, target_mean, source_sigma, target_sigma)]
#ovls = vec_get_ovl(source_mean, target_mean, source_sigma, target_sigma)
#ovl = np.mean(ovls)
###k_span = time() - k_start
#x = np.random.normal(source_mean, source_sigma)
#y = np.random.normal(target_mean, target_sigma)
#x = np.random.default_rng().multivariate_normal(source_mean, source_cov, method = "cholesky", size = 1)
#y = np.random.default_rng().multivariate_normal(target_mean, target_cov, method = "cholesky", size =1)
#print(x)
#print(y)
#print("calc entropy")
#kl_out = 0# entropy(x+ 0.0001,y+ 0.0001)
#print("end kl")
#kl_out = kl(x, y).numpy()
#kl_out = kl_mvn(source_mean, source_cov, target_mean, target_cov)
#rv = multivariate_normal([mu_x, mu_y], [[sigma_x, 0], [0, sigma_y]])
##w2_start = time()
##w2s = vec_get_w2(source_mean, target_mean, source_sigma, target_sigma)
#print("source_mean",source_mean)
#print("target_mean", target_mean)
#print("source_sigma",source_sigma)
#print("target_sigma", target_sigma)
#print("maxes", max(source_mean), max(target_mean), max(source_sigma), max(target_sigma))
#print("w2s", w2s)
##w2_out = 1 - np.mean(w2s)
##w2_span = time() - w2_start
##w2_vect_start = time()
##w2_vect = 1 - (np.sqrt(euclidean(source_mean, target_mean)**2 + euclidean(source_sigma, target_sigma)**2))/len(source_mean)
##w2_vect_span = time() - w2_vect_start
#e_span = time() - e_start
nb_w2_vect_start = time()
nb_w2_vect = np.empty(source_mean.shape[0] , dtype=np.float32)
#print(nb_w2_vect)
nb_w2_vect = numba_w2(source_mean, target_mean,source_sigma, target_sigma, nb_w2_vect)
nb_w2_vect_span = time() - nb_w2_vect_start
#print("nb vect", nb_w2_vect)
#w2_out = 1 - np.mean(nb_w2_vect) # Wrong, not bounded by 1
mean_w2 = np.mean(nb_w2_vect)
w2_out = 1/(1 + mean_w2) # somewhat flips
w2_e_out = np.exp(-mean_w2)
w2_ediv_out = np.exp(-mean_w2/10)
#nb_o_vect_start = time()
#nb_o_vect = np.empty(source_mean.shape[0] , dtype=np.float32)
#print(nb_o_vect)
#nb_o_vect = numba_ovl(source_mean, target_mean,source_sigma, target_sigma, nb_o_vect)
#nb_o_vect_span = time() - nb_o_vect_start
##euc_mean = euclidean(source_mean, target_mean)
##euc_sigma = euclidean(source_sigma, target_sigma)
#print( "ovl", ovl, "kl", kl_out, "avg_w2", w2_out, "nb_avg_w2", nb_w2_vect, "w2_vect", w2_vect, "cossim", edge['weight'], "total_time", e_span, "dict_time", d_span, "vec_overlap time", k_span, "kl_time", m_span, "w2_time", w2_span, "w2_v_time", w2_vect_span, "nb_w2_time", nb_w2_vect_span)
if source == target:
if weight < 0.99:
print("Warning, score for {} and {} should be close to 1, but is {}. check indices".format(source, target, weight))
#continue
#print(source,target,weight,cosinesim,w2_out, ovl)
#o.write("{},{},{:.5f},{:.5f},{:.5f},{:.5f},{:.10f},{},{},{}\n".format(source, target, weight, ovl, kl_out, w2_out, w2_vect, euc_mean, euc_sigma, nb_w2_vect))
o.write("{},{},{:.5f},{:.5f},{:.8f},{:.8f},{:.8f}\n".format(source,target,weight,cosinesim,w2_out,w2_e_out,w2_ediv_out))
e_span = time() - e_start
print("second similarty taken in {} seconds".format(e_span))
print("outfile made", time() - true_start)
# Step 2: A this point take everything about mean similarity threshold and do distribution comparison
# for edge in G.es():
#np.take(embedding_dict['sequence_embeddings'], [source_idx], axis = 0)
#source_sigma =# np.take(embedding_dict['sequence_embeddings_sigma'], [source_idx], axis = 0)
| clairemcwhite/transformer_infrastructure | hf_seqsim.py | hf_seqsim.py | py | 27,653 | python | en | code | 2 | github-code | 13 |
74564285458 | """
_InsertComponent_
MySQL implementation of UpdateWorker
"""
__all__ = []
import time
from WMCore.Database.DBFormatter import DBFormatter
class UpdateWorker(DBFormatter):
sqlpart1 = """UPDATE wm_workers
SET last_updated = :last_updated
"""
sqlpart3 = """ WHERE name = :worker_name"""
def execute(self, workerName, state=None, timeSpent=None,
results=None, conn=None, transaction=False):
binds = {"worker_name": workerName,
"last_updated": int(time.time())}
sqlpart2 = ""
if state:
binds["state"] = state
sqlpart2 += ", state = :state"
if timeSpent is not None:
binds["cycle_time"] = timeSpent
sqlpart2 += ", cycle_time = :cycle_time"
binds["outcome"] = results
sqlpart2 += ", outcome = :outcome"
sql = self.sqlpart1 + sqlpart2 + self.sqlpart3
self.dbi.processData(sql, binds, conn=conn,
transaction=transaction)
return
| dmwm/WMCore | src/python/WMCore/Agent/Database/MySQL/UpdateWorker.py | UpdateWorker.py | py | 1,071 | python | en | code | 44 | github-code | 13 |
74909554896 | import math
def f(x):
funcao = math.exp(-x**2) - math.cos(x)
return funcao
a = float(input())
a_salvo = a
b = float(input())
b_salvo = b
L = float(input())
while True:
x = (a * f(b) - b * f(a)) / (f(b) - f(a))
if f(a)*f(b) < 0:
if abs(f(x)) > L:
if f(a)*f(x) > 0:
a = x
if f(b)*f(x) > 0:
b = x
'''
if f(a) < 0:
a = x
if f(b) < 0:
b = x
'''
else:
print(x)
print(abs(f(a_salvo)-f(b_salvo)))
break
else:
print("não há raiz neste intervalo")
break
| Teuszin/Calculo-Numerico | Listas_do_Lop/Lista_02/Raiz_M_Cordas.py | Raiz_M_Cordas.py | py | 674 | python | pt | code | 0 | github-code | 13 |
19623047190 | # -*- coding: utf-8 -*-
import numpy as np
import pandas as pd
class firstInFirstOut:
def processData(self, numberFrames, pages):
countPages = 0
controlFrameExit = 0
pagesInMem = list()
for i in range(len(pages)):
if np.isin(pages[i],pagesInMem):
pass
else:
if(len(pagesInMem)<numberFrames[0].astype(int)):
pagesInMem.append(pages[i])
countPages +=1
else:
pagesInMem[controlFrameExit] = pages[i]
controlFrameExit += 1
if(controlFrameExit == numberFrames[0].astype(int)):
controlFrameExit = 0
countPages +=1
print("FIFO {}".format(countPages)) | streeg/pseudo-os | memory/firstInFirstOut.py | firstInFirstOut.py | py | 803 | python | en | code | 0 | github-code | 13 |
20061336875 | """
Computer choice = rock/papers/scissors
User choice = input
Compare
"""
import random
options = ['rock', 'papers', 'scissors']
computer_choice = random.choice(options)
user_choice = input("Enter rock/papers/scissors : ")
if user_choice!="paper" and user_choice!="rock" and user_choice!="scissors":
print("invalid choice, please enter right one")
elif user_choice == "paper" and computer_choice == "rock":
print('you won!!')
elif user_choice == "rock" and computer_choice == "scissors":
print('you won!!')
elif user_choice == "scissors" and computer_choice == "paper":
print('you won!!')
elif user_choice == computer_choice:
print("its a tie")
else:
print(f"you lost, computer choice was {computer_choice}") | tejasps/Python_Basic_Projects | rock_papers_scissors.py | rock_papers_scissors.py | py | 772 | python | en | code | 0 | github-code | 13 |
8479410064 | from django.shortcuts import render, redirect
from .forms import ArticleForm, CommentForm
from .models import Article
# Create your views here.
def index(request):
articles = Article.objects.all()
context = {
"articles": articles,
}
return render(request, "articles/index.html", context)
def create(request):
if request.method == "POST":
article_form = ArticleForm(request.POST, request.FILES)
if article_form.is_valid():
article_form.save()
return redirect("articles:index")
else:
article_form = ArticleForm()
context = {
"article_form": article_form,
}
return render(request, "articles/create.html", context=context)
def detail(request, pk):
article = Article.objects.get(pk=pk)
comment_form = CommentForm()
context = {
"article": article,
"comment_form": comment_form,
}
return render(request, "articles/detail.html", context)
| kimheekimhee/TIL | django/1018/articles/views.py | views.py | py | 974 | python | en | code | 1 | github-code | 13 |
10255541746 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Thu Apr 9 20:05:54 2020
@author: sarroutim2
"""
from torch.nn.utils.rnn import pack_padded_sequence
from torch.optim.lr_scheduler import ReduceLROnPlateau
import argparse
import csv
import h5py
import progressbar
import time
from utils import Vocabulary
from utils import load_vocab
from models import Classifier
from utils import get_loader
from utils import process_lengths
import torch.nn as nn
import torch.nn.functional as F
import os
import logging
import json
import torch
import torch.backends.cudnn as cudnn
from utils import get_glove_embedding
def binary_accuracy(preds, y):
#round predictions to the closest integer
preds = torch.max(preds, 1)[1]
correct=0
total=0
correct += (preds == y).float().sum()
total += y.shape[0]
acc = correct / total
return acc
def create_model(args, vocab, embedding=None):
"""Creates the model.
Args:
args: Instance of Argument Parser.
vocab: Instance of Vocabulary.
Returns:
A multi class classification model.
"""
# Load GloVe embedding.
if args.use_glove:
embedding = get_glove_embedding(args.embedding_name,
300,
vocab)
else:
embedding = None
# Build the models
logging.info('Creating multi-class classification model...')
model = Classifier(len(vocab),
embedding_dim=args.embedding_dim,
embedding=embedding,
hidden_dim=args.num_hidden_nodes,
output_dim=args.num_output_nodes,
num_layers=args.num_layers,
bidirectional=args.bidirectional,
dropout=args.dropout,
rnn_cell=args.rnn_cell)
return model
def evaluate(model, data_loader, criterion, args):
model.eval()
epoch_loss=0
epoch_acc=0
for i, (sentences, labels, qindices) in enumerate(data_loader):
#n_steps += 1
# Set mini-batch dataset.
if torch.cuda.is_available():
sentences = sentences.cuda()
labels = labels.cuda()
qindices = qindices.cuda()
lengths = process_lengths(sentences)
lengths.sort(reverse = True)
#convert to 1D tensor
predictions = model(sentences, lengths)
loss = criterion(predictions, labels)
#compute the binary accuracy
acc = binary_accuracy(predictions, labels)
#backpropage the loss and compute the gradients
#loss and accuracy
epoch_loss += loss.item()
epoch_acc+= acc.item()
logging.info('\t Val-Loss: %.4f | Val-Acc: %.2f '
% (loss.item() , acc.item()*100))
def train(args):
# Create model directory
if not os.path.exists(args.model_path):
os.makedirs(args.model_path)
# Save the arguments.
with open(os.path.join(args.model_path, 'args.json'), 'w') as args_file:
json.dump(args.__dict__, args_file)
# Config logging.
log_format = '%(levelname)-8s %(message)s'
logfile = os.path.join(args.model_path, 'train.log')
logging.basicConfig(filename=logfile, level=logging.INFO, format=log_format)
logging.getLogger().addHandler(logging.StreamHandler())
logging.info(json.dumps(args.__dict__))
vocab = load_vocab(args.vocab_path)
# Build data loader
logging.info("Building data loader...")
train_sampler = None
val_sampler = None
if os.path.exists(args.train_dataset_weights):
train_weights = json.load(open(args.train_dataset_weights))
train_weights = torch.DoubleTensor(train_weights)
train_sampler = torch.utils.data.sampler.WeightedRandomSampler(
train_weights, len(train_weights))
if os.path.exists(args.val_dataset_weights):
val_weights = json.load(open(args.val_dataset_weights))
val_weights = torch.DoubleTensor(val_weights)
val_sampler = torch.utils.data.sampler.WeightedRandomSampler(
val_weights, len(val_weights))
data_loader = get_loader(args.dataset,
args.batch_size, shuffle=True,
num_workers=args.num_workers,
max_examples=args.max_examples,
sampler=train_sampler)
val_data_loader = get_loader(args.val_dataset,
args.batch_size, shuffle=False,
num_workers=args.num_workers,
max_examples=args.max_examples,
sampler=val_sampler)
logging.info("Done")
model = create_model(args, vocab)
if args.load_model is not None:
model.load_state_dict(torch.load(args.load_model))
logging.info("Done")
criterion = nn.CrossEntropyLoss()
#criterion = nn.BCELoss()
# Setup GPUs.
if torch.cuda.is_available():
logging.info("Using available GPU...")
model.cuda()
criterion.cuda()
torch.backends.cudnn.enabled = True
cudnn.benchmark = True
# Parameters to train.
params = model.parameters()
learning_rate = args.learning_rate
optimizer = torch.optim.Adam(params, lr=learning_rate)
scheduler = ReduceLROnPlateau(optimizer=optimizer, mode='min',
factor=0.1, patience=args.patience,
verbose=True, min_lr=1e-7)
# Train the model.
total_steps = len(data_loader)
start_time = time.time()
n_steps = 0
#initialize every epoch
epoch_loss = 0
epoch_acc = 0
#set the model in training phase
for epoch in range(args.num_epochs):
epoch_loss = 0
epoch_acc = 0
model.train()
for i, (sentences, labels, qindices) in enumerate(data_loader):
n_steps += 1
# Set mini-batch dataset.
if torch.cuda.is_available():
sentences = sentences.cuda()
labels = labels.cuda()
qindices = qindices.cuda()
lengths = process_lengths(sentences)
lengths.sort(reverse = True)
#resets the gradients after every batch
optimizer.zero_grad()
#convert to 1D tensor
predictions = model(sentences, lengths)
loss = criterion(predictions, labels)
#compute the binary accuracy
acc = binary_accuracy(predictions, labels)
#backpropage the loss and compute the gradients
loss.backward()
#update the weights
optimizer.step()
#loss and accuracy
epoch_loss += loss.item()
epoch_acc+= acc.item()
delta_time = time.time() - start_time
start_time = time.time()
logging.info('Epoch [%d/%d] | Step [%d/%d] | Time: %.4f \n'
'\t Train-Loss: %.4f | Train-Acc: %.2f'
% (epoch, args.num_epochs, i,
total_steps, delta_time,
loss.item() , acc.item()*100))
evaluate(model, val_data_loader, criterion, args)
torch.save(model.state_dict(),
os.path.join(args.model_path,
'model-tf-%d.pkl' % (epoch+1)))
if __name__ == '__main__':
parser = argparse.ArgumentParser()
# Session parameters.
parser.add_argument('--model-path', type=str, default='weights/tf1/',
help='Path for saving trained models')
parser.add_argument('--save-step', type=int, default=None,
help='Step size for saving trained models')
parser.add_argument('--num-epochs', type=int, default=20)
parser.add_argument('--batch-size', type=int, default=8)
parser.add_argument('--num-workers', type=int, default=8)
parser.add_argument('--learning-rate', type=float, default=0.001)
parser.add_argument('--patience', type=int, default=10)
parser.add_argument('--max-examples', type=int, default=None,
help='For debugging. Limit examples in database.')
# Data parameters.
parser.add_argument('--vocab-path', type=str,
default='data/processed/vocab.json',
help='Path for vocabulary wrapper.')
parser.add_argument('--dataset', type=str,
default='data/processed/dataset_train.hdf5',
help='Path for train annotation json file.')
parser.add_argument('--val-dataset', type=str,
default='data/processed/dataset_val.hdf5',
help='Path for train annotation json file.')
parser.add_argument('--train-dataset-weights', type=str,
default='data/processed/train_dataset_weights.json',
help='Location of sampling weights for training set.')
parser.add_argument('--val-dataset-weights', type=str,
default='data/processed/test_dataset_weights.json',
help='Location of sampling weights for training set.')
parser.add_argument('--load-model', type=str, default=None,
help='Location of where the model weights are.')
# Model parameters
parser.add_argument('--rnn-cell', type=str, default='LSTM',
help='Type of rnn cell (GRU, RNN or LSTM).')
parser.add_argument('--num-layers', type=int, default=2,
help='Number of layers in lstm.')
parser.add_argument('--num-output-nodes', type=int, default=4,
help='Number of labels.')
parser.add_argument('--num-hidden-nodes', type=int, default=100,
help='Number of hidden nodes.')
parser.add_argument('--bidirectional', action='store_true', default=False,
help='Boolean whether the RNN is bidirectional.')
parser.add_argument('--use-glove', action='store_true', default=True,
help='Whether to use GloVe embeddings.')
parser.add_argument('--use-w2v', action='store_true', default=False,
help='Whether to use W2V embeddings.')
parser.add_argument('--embedding-name', type=str, default='6B',
help='Name of the GloVe embedding to use. data/processed/PubMed-w2v.txt')
parser.add_argument('--embedding-dim', type=int, default=100,
help='Embedding size.')
parser.add_argument('--dropout', type=float, default=0.3,
help='Dropout applied to the RNN model.')
parser.add_argument('--num-att-layers', type=int, default=2,
help='Number of attention layers.')
args = parser.parse_args()
train(args)
# Hack to disable errors for importing Voca
| sarrouti/multi-class-text-classification-pytorch | train.py | train.py | py | 11,236 | python | en | code | 3 | github-code | 13 |
10273502877 | ################################################################################
## IMPORTS #####################################################################
################################################################################
#import data
import numpy as np
import random
from numpy import asarray as arr
from numpy import asmatrix as mat
from numpy import atleast_2d as twod
################################################################################
################################################################################
################################################################################
################################################################################
## TESTING FUNCTIONS ###########################################################
################################################################################
def cross_validate(X, Y, n_folds, i_fold):
"""
Function that splits data for n-fold cross validation.
Parameters
----------
X : numpy array
N x M numpy array that contains data points.
Y : numpy array
1 x N numpy array that contains labels that correspond to
data points in X.
n_folds : int
Total number of data folds.
i_fold : int
The fold for which the current call of cross_validate
will partition.
Returns
-------
to_return : (Xtr,Xte,Ytr,Yte)
Tuple that contains (in this order) training data from X, testing
data from X, training labels from Y, and testing labels from Y.
"""
Y = arr(Y).flatten()
nx,dx = twod(X).shape
ny = len(Y)
idx = range(nx)
if ny > 0:
assert nx == ny, 'cross_validate: X and Y must have the same length'
n = np.fix(nx / n_folds)
te_start = int((i_fold - 1) * n)
te_end = int((i_fold * n)) if (i_fold * n) <= nx else int(nx)
test_range = list(range(te_start, te_end))
train_range = sorted(set(idx) - set(test_range))
to_return = (X[train_range,:], X[test_range,:])
if ny > 0:
to_return += (Y[train_range], Y[test_range])
return to_return
def test_randomly(data, labels, mix=0.8, end=0, test=lambda x: 1.0, *args):
"""
Function that performs random tests using data/labels.
Parameters
----------
data : numpy array
N x M array of data points used for training/testing learner.
N = number of data; M = number of features.
labels : numpy array
1 x N array of class/regression labels used for training/testing learner.
mix : float
The percentage of data to use for training (1 - mix = percentage of data
used for testing).
end : int
The number of tests to run.
test : function object
A function that takes at least four arguments (arrays containing data/labels
for testing/training) and performs tests. This function should return an
error value for one experiment.
args : mixed
Any additional arguments needed for testing.
Returns
-------
float
Average error value of all tests performed.
"""
start = 0
end = len(data) if end == 0 else end
avg_err = 0
for i in range(start, end):
indexes = range(len(data))
train_indexes = random.sample(indexes, int(mix * len(data)))
test_indexes = list(set(indexes) - set(train_indexes))
trd,trc = data[train_indexes], labels[train_indexes]
ted,tec = data[test_indexes], labels[test_indexes]
avg_err += test(trd, trc, ted, tec, *args)
return avg_err / end
################################################################################
################################################################################
################################################################################
################################################################################
## MAIN ########################################################################
################################################################################
if __name__ == '__main__':
pass
# data,classes = data.load_data_from_csv('../data/classifier-data.csv', 4, float)
# data,classes = arr(data), arr(classes)
#
# for i in range(1,6):
# Xtr,Xte,Ytr,Yte = cross_validate(data, classes, 5, i)
# print('i =', i)
# print('len(Xtr)')
# print(len(Xtr))
# print('len(Xte)')
# print(len(Xte))
# print('len(Ytr)')
# print(len(Ytr))
# print('len(Yte)')
# print(len(Yte))
# print()
################################################################################
################################################################################
################################################################################
| austinsherron/Python-Machine-Learning | utils/test.py | test.py | py | 4,484 | python | de | code | 0 | github-code | 13 |
16313989232 | """
This script will take multiple fasta files and it will delete samples within them
based on a list of names from a second file. It will return a "cleaned" fasta file
for each of the original files in a new directory.
Simon Uribe-Convers - December 1st, 2017 - http://simonuribe.com
"""
import sys
import os
from Bio import SeqIO
import glob
if __name__ == "__main__":
if len(sys.argv) != 3:
print("Usage Error, type: python "+sys.argv[0]+" fasta_file_ending (e.g., fasta, fa, aln) list_with_names")
sys.exit(0)
if os.path.exists("Cleaned_Files"):
os.system("rm -r Cleaned_Files")
os.mkdir("Cleaned_Files")
file_ending = (sys.argv[1])
file_with_names = open(sys.argv[2], "r")
# Create list of genes to subset with
names_subset = []
for line in file_with_names:
names = line.strip().split("\t")[0]
names_subset.append(names)
for filename in glob.glob("*." + file_ending):
#Read in sequence data
seqfile = SeqIO.parse(open(filename),'fasta')
# Files to write out
output = open(filename.split("." + file_ending)[0] + "_cleaned." + file_ending , "w")
# Delete sequence from list from each fasta file
for seq in seqfile:
# Split the name so it matches the names to subset
# This is only necessary if the names in different fasta files have some
# locus specific identifier, e.g., sp1_gene1, sp1_gene2. Modify the way
# to split the name according to your naming convention.
seq_name = str(seq.id.split("_Combined")[0])
# If the names are all the same in every fasta file, comment the line
# above and uncomment the line below.
# seq_name = str(seq.id)
sequence = str(seq.seq)
if seq_name not in names_subset:
output.write(">" + seq_name + "\n" + sequence + "\n")
#print("Original file has: %d"),
# Output some inforation
records = list(SeqIO.parse(filename, "fasta"))
print("\nWorking on file: %s" %filename)
print("Sequences in original file: %d" % len(records))
clean_records = list(SeqIO.parse(filename.split("." + file_ending)[0] + "_cleaned." + file_ending, "fasta"))
print("Sequences in cleaned file: %d" % len(clean_records))
# Housekeeping
os.system("mv *_cleaned* ./Cleaned_Files")
print("\nFinished, the cleaned files are in the 'Cleaned_Files' directory.\n")
| uribe-convers/Genomic_Scripts | Delete_Sequences_in_Multiple_Files_Based_on_Names.py | Delete_Sequences_in_Multiple_Files_Based_on_Names.py | py | 2,376 | python | en | code | 0 | github-code | 13 |
24252937918 | import os
import json
import pandas as pd
import numpy as np
from tqdm import tqdm
import cv2
from .utils_dataset import NpEncoder
df = pd.read_csv("/data/datasets/KITTI/kitti_scene_infos.csv")
TARGET_SIZE = (256, 256)
class KittiBuilder:
def __init__(self, in_dir, seq_nb):
self.in_dir = in_dir
self.SEQ = seq_nb
self.scene_name = self.SEQ.zfill(2)
self.set_root_paths()
self.set_nb_frames()
self.poseRt = pd.read_csv(
os.path.join(self.ROOT_PATH_POSE, f"{self.scene_name}" + ".txt"),
delimiter=" ",
header=None,
)
def set_root_paths(self):
# Set both the root path for the RGB images and the camera pose.
self.ROOT_PATH_RGB = os.path.join(
self.in_dir, "RGB_rescaled_256x256", self.scene_name
)
self.ROOT_PATH_POSE = os.path.join(self.in_dir, "pose_camera", self.scene_name)
def set_nb_frames(self):
self.nb_frames = df[df["scene_id"] == int(self.scene_name)][
"scene_frame_numbers"
].to_list()[0]
def compute_intrinsic_K(self):
file_path = os.path.join(self.ROOT_PATH_POSE, "calib.txt")
calib = pd.read_csv(file_path, delimiter=" ", header=None, index_col=0)
P2 = np.array(calib.loc["P2:"]).reshape((3, 4))
self.K, _, _, _, _, _, _ = cv2.decomposeProjectionMatrix(P2)
# Correct K according to the scaling impose by TARGET_SIZE
scale_u = TARGET_SIZE[0] / (2 * self.K[0, -1])
scale_v = TARGET_SIZE[1] / (2 * self.K[1, -1])
self.K[0] *= scale_u
self.K[1] *= scale_v
return self.K
def get_extrinsic(self, viewIdx: int) -> np.array:
Rt = np.array(self.poseRt.iloc[viewIdx]).reshape((3, 4))
Rt4x4 = np.eye(4)
Rt4x4[:3] = Rt
return Rt4x4
def build_json(self, train_or_test: bool):
list_frames = (
self.train_framesIdx if train_or_test == "train" else self.test_framesIdx
)
for i, idx_frame in tqdm(enumerate(list_frames)):
RT = self.get_extrinsic(idx_frame)
if train_or_test == "train":
# Ensure that RT is going to be write at the write location in the dictionnary.
assert (
os.path.basename(self.train_data["infos"][i]["img_basename"])
== str(idx_frame).zfill(6) + ".png"
)
self.train_data["infos"][i]["transform_matrix"] = RT.tolist()
else:
# Ensure that RT is going to be write at the write location in the dictionnary.
assert (
os.path.basename(self.test_data["infos"][i]["img_basename"])
== str(idx_frame).zfill(6) + ".png"
)
self.test_data["infos"][i]["transform_matrix"] = RT.tolist()
def save_json(self):
filename_train_json = os.path.join(self.ROOT_PATH_RGB, "transforms_train.json")
filename_test_json = os.path.join(self.ROOT_PATH_RGB, "transforms_test.json")
f_train = open(filename_train_json, "w")
f_test = open(filename_test_json, "w")
json.dump(self.train_data, f_train, cls=NpEncoder)
json.dump(self.test_data, f_test, cls=NpEncoder)
def create_complete_file(self):
arr = np.arange(self.nb_frames)
# Create the .json structure for both training and testing set.
self.data = {
"in_dir": self.ROOT_PATH_RGB,
"nb_frames": len(arr),
"selected_samples_id": list(arr),
"cameraLoc": "Cam2",
"stereoLR": None,
"intrisic_matrix": self.compute_intrinsic_K().tolist(),
"infos": [
{"img_basename": str(idx).zfill(6) + ".png", "transform_matrix": None}
for idx in arr
],
}
for i, idx_frame in tqdm(enumerate(arr)):
RT = self.get_extrinsic(idx_frame)
# Ensure that RT is going to be write at the write location in the dictionnary.
assert (
os.path.basename(self.data["infos"][i]["img_basename"])
== str(idx_frame).zfill(6) + ".png"
)
self.data["infos"][i]["transform_matrix"] = RT.tolist()
filename_json = os.path.join(
self.ROOT_PATH_RGB, "transforms_completeScene.json"
)
f = open(filename_json, "w")
json.dump(self.data, f, cls=NpEncoder)
def create_json_files(self):
print(f"Start building the json files for the scene {self.scene_name}...")
# Same train/test strategy as used in the Baseline paper.
startIndexingNull = 0
arr = (
np.arange(self.nb_frames)
if not startIndexingNull
else np.arange(1, self.nb_frames + 1)
)
np.random.shuffle(arr)
# Indexes are no more sorted here !
self.train_framesIdx = sorted(arr[: int(0.8 * self.nb_frames)])
self.test_framesIdx = sorted(arr[int(0.8 * self.nb_frames) + 1 :])
# Create the .json structure for both training and testing set.
self.train_data = {
"in_dir": self.ROOT_PATH_RGB,
"selected_samples_id": self.train_framesIdx,
"nb_frames": len(self.train_framesIdx),
"cameraLoc": "Cam2",
"stereoLR": None,
"intrisic_matrix": self.compute_intrinsic_K().tolist(),
"infos": [
{"img_basename": str(idx).zfill(6) + ".png", "transform_matrix": None}
for idx in self.train_framesIdx
],
}
self.test_data = {
"in_dir": self.ROOT_PATH_RGB,
"selected_samples_id": self.test_framesIdx,
"nb_frames": len(self.test_framesIdx),
"cameraLoc": "Cam2",
"stereoLR": None,
"intrisic_matrix": self.compute_intrinsic_K().tolist(),
"infos": [
{"img_basename": str(idx).zfill(6) + ".png", "transform_matrix": None}
for idx in self.test_framesIdx
],
}
# Build the two json file for training and testing set.
self.build_json(train_or_test="train")
self.build_json(train_or_test="test")
# Save these json files.
self.save_json()
class KittiBuilderNPY:
def __init__(self, in_dir):
self.indir = in_dir
df = pd.read_csv("/data/datasets/KITTI/kitti_scene_infos.csv")
self.scene_info = df.set_index("scene_id")["scene_frame_numbers"].to_dict()
self.scene_list = list(self.scene_info.keys())
self.scene_number = len(self.scene_list)
self.totFrames = np.sum([v for _, v in self.scene_info.items()])
print(f"Retrieved scene dictionnary: {self.scene_info}")
print(f"Total number of frames to store: {self.totFrames}")
self.build_npy(out_dir="/data/datasets/NVS_Skip")
def build_npy(self, out_dir):
# Empty np array that is gonna be filled with images.
data_img_npy = np.zeros((self.totFrames, 256, 256, 3))
i = 0
for scene in tqdm(self.scene_list):
rootPath = os.path.join(
self.indir, "RGB_rescaled_256x256", str(scene).zfill(2), "image_2"
)
print(f"Current scene processed: {scene}")
imgsName = sorted(
[l for l in os.listdir(rootPath) if not l.startswith(".")]
)
for imgName in imgsName:
pathImg = os.path.join(rootPath, imgName)
img = cv2.imread(pathImg)[:, :, ::-1]
data_img_npy[i, :] = img
i += 1
# Save the entire data array
outPath = os.path.join(out_dir, "kitti_imageOurs.npy")
print(f"Saving the .npy at {outPath}...")
np.save(outPath, data_img_npy)
if __name__ == "__main__":
# kittiBuilder = KittiBuilderNPY(in_dir= '/data/datasets/KITTI')
SEQS = [str(i) for i in range(11)]
for seq in SEQS:
testKitti = KittiBuilder(in_dir="/data/datasets/KITTI", seq_nb=seq)
testKitti.create_complete_file()
| gaetan-landreau/epipolar_NVS | dataset/kitti_builder.py | kitti_builder.py | py | 8,203 | python | en | code | 0 | github-code | 13 |
72049840657 | #Pillai Lab
#Daniel Castaneda Mogollon
#This code reports the number of snps, indels, and divergence from a reference sequence against query sequences.
#It takes a reference sequence from a .fasta file and gets the difference from every other sequence. The reference
#sequence must have 'reference' in its header. It compares nucleotides A,U,G,C,T,N and indels as '-'. The input file
#must be previously aligned and be in a .fasta format. It only asks the user for the path where the file is and a name
#for the output file.
print("This code has the purpose of printing the number of snps found between a reference vs a query sequence(s).")
print("The user must input an aligned file of said sequences, and label the reference as '>reference'.")
import os
headers=[]
sequences=[]
nucleotides=[]
reference_header=''
reference_sequence=''
#opening the MSA aligned file
file_analyze = input("Please type the path to your aligned file (must be in .txt or .fasta format): ")
with open(file_analyze) as f:
content = f.readlines()
for lines in content:
if lines.startswith(">"):
headers.append(lines.replace("\n",""))
else:
sequences.append(lines.upper().replace("\n",""))
i=0
#getting the reference sequence, assuming it has the word 'reference' in it
for item in headers:
if item.__contains__("reference"):
# getting the reference sequence from the header index
reference_sequence = sequences[i]
reference_header = headers[i]
else:
i=i+1
#making sure the sequences have the same size (considering indels as -)
for item in sequences:
nucleotides.append(len(item))
first_sequence_length = nucleotides[0]
for items in nucleotides:
if len(item)!=first_sequence_length:
print("The sequences are different size. Exiting the program now.")
exit()
else:
continue
print("Sequences are the same size. Analyzing snps . . .")
print(reference_sequence)
#getting an output file as csv
f_out_name = input("Please name your output file: ")
f_out = open(f_out_name+'.csv','a')
snps_list=[]
indels_list=[]
DNA_nucleotides=['A','T','G','C','N','U']
snp_counter = 0
indel_counter = 0
for sequence in sequences:
for j in range(0,len(reference_sequence),1):
if (reference_sequence[j] in DNA_nucleotides) and (sequence[j] in DNA_nucleotides):
if reference_sequence[j]!=sequence[j]:
snp_counter = snp_counter+1
else:
#in this case the nucleotides match each other
continue
else:
if (reference_sequence[j]=='-') and (sequence[j]=='-'):
continue
elif (reference_sequence[j]=='-' and sequence[j]!='-'):
indel_counter= indel_counter+1
elif (reference_sequence[j]!='-' and sequence[j]=='-'):
indel_counter= indel_counter+1
snps_list.append(snp_counter)
indels_list.append(indel_counter)
snp_counter = 0
indel_counter=0
#printing the snps and indels into the output file
f_out.write("Sequence name,SNPs,Indels,Divergence(%),\n")
divergence_list=[]
for m in range(0,len(headers),1):
divergence_list.append(((snps_list[m]+indels_list[m])/len(sequences[m])*100))
f_out.write(headers[m]+','+str(snps_list[m])+','+str(indels_list[m])+','+str(divergence_list[m])+',\n')
| dcm9123/pillai_lab | snp_finder.py | snp_finder.py | py | 3,372 | python | en | code | 0 | github-code | 13 |
40526187068 | from math import *
from stepper import *
STEP_PER_MM=0.0125
MM_PER_STEP=80
STEP_TMC2225_32_PER_MM=0.00125
MM_TMC2225_32_PER_STEP=800
#2GT 2mm per gear, 20 gear , 20*2=40mm a cycle
#1.8 per step, so 200 steps a cycle. then 0.2mm/step
trace_flg=0
def plat_plot_show():
pass
def plat_plot(x,y,para):
pass
def axis_accm_step(acc,delta):
acc+=delta
step=0
if 0==int(acc):
pass # less one step
else :
step=int(acc)
acc-=int(acc)
return acc,step
def set_z(sz,ez):
acc_z=0
zc=sz.position()
z=float(zc)*STEP_TMC2225_32_PER_MM
dz = ez - z;
if 1:
print(">c:%d z:%f->%f dz:%f" % (zc,z,ez,dz))
if abs(dz) < STEP_TMC2225_32_PER_MM:
return ez
c = floor(abs(8*dz) );
if c >0 :
del_z= (dz) /c
del_z= del_z*MM_TMC2225_32_PER_STEP
for i in range(0,c) :
acc_z,step=axis_accm_step(acc_z,del_z)
sz.step(step)
zc=sz.position()
z=float(zc)*STEP_TMC2225_32_PER_MM
if 1:
print(">>c:%d z:%f" % (zc,z))
return ez
class corexy():
def __init__(self, sx, sy):
self.sx=sx
self.sy=sy
self.x=0
self.y=0 # the x,y postion is exist naturely
self.acc_a=0;
self.acc_b=0
self.sx.enable()
self.sy.enable()
print("corexy init")
def actually_positon(self):
a=self.sx.position()
b=self.sy.position()
x=float((a+b)/2)*STEP_PER_MM
y=float((a-b)/2)*STEP_PER_MM
return a,b,x,y
def draw_actually_positon(self,plot_flg):
a,b,x,y=self.actually_positon()
if trace_flg:
print("a:%d b:%d x:%f ,y:%f" % (a,b,x,y))
if plot_flg:
plat_plot(x,y,'b.')
else :
plat_plot(x,y,'y.')
def direction(self,delta):
if delta> 0:
return 1
else :
return -1
def accm_step(self,acc,delta):
acc+=delta
step=0
if 0==int(acc):
pass # less one step
else :
step=int(acc)
acc-=int(acc)
return acc,step
def set_xy(self,ex,ey,plot_flg):
if 1:
a,b,x,y=self.actually_positon()
print(">a:%d b:%d x:%f ,y:%f" % (a,b,x,y))
x=self.x
y=self.y
dx = ex - x;
dy = ey - y;
c = floor(8 * sqrt(dx * dx + dy * dy));
if c >0 :
del_a= (dx+dy) /c
del_b= (dx-dy) /c
del_a= del_a*MM_PER_STEP
del_b= del_b*MM_PER_STEP
for i in range(0,c) :
#self.set_xy(x + (i * dx / c), y + (i * dy / c));
self.acc_a,step=self.accm_step(self.acc_a,del_a)
self.sx.step(step)
self.draw_actually_positon(plot_flg)
#
self.acc_b,step=self.accm_step(self.acc_b,del_b)
self.sy.step(step)
self.draw_actually_positon(plot_flg)
self.x=ex
self.y=ey
if 1:
a,b,x,y=self.actually_positon()
print(">>a:%d b:%d x:%f y:%f ex:%f ey:%f" % (a,b,x,y ,abs(ex-x),abs(ey-y)))
return ex,ey
def move_to(self,x,y):
return self.set_xy(x,y,0)
def draw_to(self,x,y):
return self.set_xy(x,y,1)
def get_xy(self):
a,b,x,y=self.actually_positon()
return x,y
def draw_rect(cxy,x,y,w,h):
cxy.move_to(x,y)
cxy.draw_to(x,y+h)
cxy.draw_to(x+w,y+h)
cxy.draw_to(x+w,y)
cxy.draw_to(x,y)
def draw_cycle(cxy,x,y,r):
cxy.move_to(x+r,y)
for i in range(0,2*314,2):
print(i)
a=float(i)/100
cxy.draw_to(x+r*cos(a),y+r*sin(a))
if __name__ == '__main__':
sx = Stepper( 14, 12, 13, speed=10000,name="x")
sy = Stepper( 4, 5, 16, speed=10000,name="y")
cxy=corexy(sx,sy)
draw_rect(cxy,10,10,10,10)
draw_cycle(cxy,20,20,20)
plat_plot_show() | chuanjinpang/esp8266_upy_plotter_controller_fireware | corexy.py | corexy.py | py | 4,382 | python | en | code | 6 | github-code | 13 |
34862561619 | import time
def test_time(func, test_times=100):
def wrapper(*args, **kwargs):
# 计时
start = time.process_time()
for i in range(test_times):
result = func(*args, **kwargs)
elapsed = (time.process_time() - start)
print(func.__name__, ":")
print("Time used:", elapsed)
print("Result:", result)
return result
return wrapper
| GMwang550146647/leetcode | fundamentals/test_time.py | test_time.py | py | 411 | python | en | code | 0 | github-code | 13 |
48489364474 | #Vanshika Shah
#UCID: vns25
#Section 003
#! /usr/bin/env python3
# Echo Server
import sys
import socket
import codecs
import datetime, time
from datetime import timezone
import os
# Read server IP address and port from command-line arguments
serverIP = sys.argv[1]
serverPort = int( sys.argv[2] )
dataLen = 1000000
# Create a TCP welcoming socket
serverSocket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
# Assign server IP address and port number to socket
serverSocket.bind( (serverIP, serverPort) )
#Listen for incoming connection requests
serverSocket.listen(1)
print('The server is ready to receive on port: ' + str(serverPort) + '\n')
# loop forever listening for incoming connection requets
while True:
connectionSocket, address = serverSocket.accept()
# Receive GET request and ignore headers
data = ""
if_mod_time = ""
headers_recv = connectionSocket.recv(dataLen).decode().splitlines()
for header in headers_recv:
if "GET" in header:
filename = header.split()[1][1:]
data = data + header + "\r\n"
elif "Host" in header:
data = data + header + "\r\n"
elif "If-Modified-Since" in header:
if_mod_time = header[19:] + "\r\n"
data = data + header + "\r\n"
data = data + "\r\n"
# get current time in UTC and convert to string in HTTP format
t = datetime.datetime.utcnow()
date = t.strftime("%a, %d %b %Y %H:%M:%S GMT\r\n")
status1 = "HTTP/1.1 200 OK\r\n"
status2 = "HTTP/1.1 304 Not Modified\r\n"
status3 = "HTTP/1.1 404 Not Found\r\n"
# check if file exists https://careerkarma.com/blog/python-check-if-file-exists/#:~:text=To%20check%20if%2C%20in%20Python,exists()%20checks%20for%20both.
if not os.path.isfile(filename):
response = status3 + "Date: " + date + "Content-Length: " + "0\r\n" + "\r\n"
else:
# determine file's modification time
secs = os.path.getmtime( filename )
t = time.gmtime( secs )
last_mod_time = time.strftime( "%a, %d %b %Y %H:%M:%S GMT\r\n", t )
#get body
file = codecs.open(filename, "r", "utf-8") # https://www.kite.com/python/answers/how-to-open-an-html-file-in-python#:~:text=Use%20codecs.,file%20in%20read%2Donly%20mode.
body = file.read()
#get content length https://stackoverflow.com/questions/6591931/getting-file-size-in-python and content type
content_len = str ( os.path.getsize(filename) ) + "\r\n"
content_type = "text/html; charset=UTF-8\r\n"
#conditional GET
if if_mod_time != "":
# if not modified on server
if if_mod_time == last_mod_time:
response = status2 + "Date: " + date + "\r\n"
else:
response = status1 + "Date: " + date + "Last-Modified: " + last_mod_time + "Content-Length: " + content_len + "Content-Type: " + content_type + "\r\n" + body
else:
response = status1 + "Date: " + date + "Last-Modified: " + last_mod_time + "Content-Length: " + content_len + "Content-Type: " + content_type + "\r\n" + body
# Echo back to client
connectionSocket.send( response.encode() )
connectionSocket.close()
| vns25/Computer-Networks | HW4/httpserver.py | httpserver.py | py | 3,265 | python | en | code | 0 | github-code | 13 |
6086116959 | import json
import os
import time
import uuid
from os.path import join, exists
import cclib
from rdkit.Chem.rdDistGeom import EmbedMolecule
from rdkit.Chem.rdForceFieldHelpers import MMFFOptimizeMolecule, UFFOptimizeMolecule
from rdkit.Chem.rdmolfiles import MolFromSmiles, MolToXYZBlock, MolToSmiles
from rdkit.Chem.rdmolops import AddHs
from rdkit.Chem.rdmolops import RemoveHs, RemoveStereochemistry
from .evaluation import EvaluationStrategy, EvaluationError
def check_identical_geometries(xyz_path, pre_optim_smiles, post_optim_smiles_path):
"""
Checking if the given geometry (XYZ filepath) yields the same SMILES as the pre-optimization smiles
:param xyz_path: path to the XYZ file
:param pre_optim_smiles: SMILES before geometrical optimization
:param post_optim_smiles_path: path where to temporary write the post optimization SMILES
"""
# Converting XYZ to smi
command_obabel = "obabel -ixyz " + xyz_path + " -osmi -O " + post_optim_smiles_path
os.system(command_obabel + " > /dev/null 2> /dev/null")
# Reading post optim SMI file
post_optim_smi = load_obabel_smi(post_optim_smiles_path)
# Assigning the value of success if the SMILES has stayed identical
return pre_optim_smiles == post_optim_smi
def obabel_mmff94_xyz(smiles, **kwargs):
"""
Returns the string of the XYZ file obtained performing the MMFF94 molecular mechanics optimization of the given
SMILES using obabel.
Writing temporary files in $MM_WORKING_DIR if defined or otherwise in /tmp
:param smiles : input SMILES
:return : XYZ string of optimized geometry, success (whether the MM optimization was successful and the smiles has
stayed identical after optimization)
"""
working_dir = os.environ["MM_WORKING_DIR"] if "MM_WORKING_DIR" in os.environ else "/tmp"
# Computing RDKIT canonical SMILES
smi_canon = MolToSmiles(MolFromSmiles(smiles))
filename_smiles = str(os.getpid()) + "_" + smi_to_filename(smi_canon)
# Computing files paths
smi_path = join(working_dir, filename_smiles + ".smi")
xyz_path = join(working_dir, filename_smiles + ".xyz")
post_MM_smi_path = join(working_dir, filename_smiles + ".post_MM.smi")
try:
# Writing smiles to file
with open(smi_path, "w") as f:
f.write(smi_canon)
# Converting SMILES to XYZ after computing MM (Obabel MMFF94)
command_obabel = "obabel -ismi " + smi_path + " -oxyz -O " + xyz_path + " --gen3d"
os.system(command_obabel + " > /dev/null 2> /dev/null")
# Reading XYZ string
with open(xyz_path, "r") as f:
xyz_str = f.read()
# Success if the post MM smiles is identical the pre MM smiles
success = check_identical_geometries(xyz_path, smi_canon, post_MM_smi_path)
except Exception as e:
success = False
xyz_str = None
finally:
# Removing files
remove_files([smi_path, xyz_path, post_MM_smi_path])
return xyz_str, success
def rdkit_mm_xyz(smiles, ff="MMFF94", **kwargs):
"""
Returns the string of the XYZ file obtained performing the MMFF94 or UFF molecular mechanics optimization of the
given SMILES using RDKit.
Writing temporary files in $MM_WORKING_DIR if defined or otherwise in /tmp
:param smiles: input_SMILES
:param ff: whether to use MMFF94 force field ("MMFF94") or UFF force field ("UFF")
:return : XYZ string of optimized geometry, success (whether the MM optimization was successful and the smiles has
stayed identical after optimization)
"""
working_dir = os.environ["MM_WORKING_DIR"] if "MM_WORKING_DIR" in os.environ else "/tmp"
# Converting the molecule to RDKit object
mol = MolFromSmiles(smiles)
smi_canon = MolToSmiles(MolFromSmiles(smiles))
# Setting paths
filename_smiles = str(os.getpid()) + "_" + smi_to_filename(smi_canon)
xyz_path = join(working_dir, filename_smiles + '.xyz')
post_MM_smi_path = join(working_dir, filename_smiles + '.smi')
# Computing geometry
try:
# Adding implicit hydrogens
mol = AddHs(mol)
# MM optimization
EmbedMolecule(mol)
if ff == "MMFF94":
value = MMFFOptimizeMolecule(mol, maxIters=kwargs["max_iterations"])
elif ff == "UFF":
value = UFFOptimizeMolecule(mol, maxIters=kwargs["max_iterations"])
# Success if returned value is null
success_RDKIT_output = value == 0
# Computing XYZ from optimized molecule
xyz_str = MolToXYZBlock(mol)
# Writing optimized XYZ to file
with open(xyz_path, "w") as f:
f.writelines(xyz_str)
# Success if the optimization has converged and the post MM smiles is identical the pre MM smiles
success = success_RDKIT_output and check_identical_geometries(xyz_path, smi_canon, post_MM_smi_path)
except Exception as e:
success = False
xyz_str = None
finally:
# Removing files
remove_files([post_MM_smi_path, xyz_path])
return xyz_str, success
def rdkit_mmff94_xyz(smiles, **kwargs):
"""
Returns the string of the XYZ file obtained performing the MMFF94 molecular mechanics optimization of the given
SMILES using RDKit.
Writing temporary files in $MM_WORKING_DIR if defined or otherwise in /tmp
:param smiles: input_SMILES
:param max_iterations: max number of iterations (default 500)
:return : XYZ string of optimized geometry, success (whether the MM optimization was successful and the smiles has
stayed identical after optimization)
NOTE : DEPRECATED FUNCTION. Kept here for backwards compatibility. Now it is better to call rdkit_mm_xyz using
the ff="MMFF94" parameter.
"""
working_dir = os.environ["MM_WORKING_DIR"] if "MM_WORKING_DIR" in os.environ else "/tmp"
# Converting the molecule to RDKit object
mol = MolFromSmiles(smiles)
smi_canon = MolToSmiles(MolFromSmiles(smiles))
# Setting paths
filename_smiles = str(os.getpid()) + "_" + smi_to_filename(smi_canon)
xyz_path = join(working_dir, filename_smiles + '.xyz')
post_MM_smi_path = join(working_dir, filename_smiles + '.smi')
# Computing geometry
try:
# Adding implicit hydrogens
mol = AddHs(mol)
# MM optimization
EmbedMolecule(mol)
value = MMFFOptimizeMolecule(mol, maxIters=kwargs["max_iterations"])
# Success if returned value is null
success_RDKIT_output = value == 0
# Computing XYZ from optimized molecule
xyz_str = MolToXYZBlock(mol)
# Writing optimized XYZ to file
with open(xyz_path, "w") as f:
f.writelines(xyz_str)
# Success if the optimization has converged and the post MM smiles is identical the pre MM smiles
success = success_RDKIT_output and check_identical_geometries(xyz_path, smi_canon, post_MM_smi_path)
except Exception as e:
success = False
xyz_str = None
finally:
# Removing files
remove_files([post_MM_smi_path, xyz_path])
return xyz_str, success
def delete_file(file):
"""
Code from https://github.com/Cyril-Grl/AlphaSMILES
Delete the file if exist
:param file: the file to delete
:type file: str
:return: None
"""
if os.path.isfile(file):
os.remove(file)
def remove_files(files_list):
for filepath in files_list:
delete_file(filepath)
def smi_to_filename(smi):
smi = smi.replace('(', '_po_')
smi = smi.replace(')', '_pf_')
smi = smi.replace('/', '_s_')
smi = smi.replace('\\', '_as_')
smi = smi.replace('@', '_at_')
smi = smi.replace('#', '_sh_')
smi = smi.replace("=", '_eq_')
return smi
def filename_to_smi(filename):
filename = filename.replace("_po_", "(")
filename = filename.replace("_pf_", ")")
filename = filename.replace("_s_", "/")
filename = filename.replace("_as_", "\\")
filename = filename.replace("_at_", "@")
filename = filename.replace("_sh_", "#")
filename = filename.replace("_eq_", "=")
return filename
def load_obabel_smi(smi_path):
"""
Converting a OpenBabel SMILES into a canonical aromatic RDKit SMILES
:param smi_path:
:return:
"""
# Extracting smiles
with open(smi_path, "r") as f:
new_smi = f.readline()
# Loading converged mol
new_mol = MolFromSmiles(new_smi)
# Removing stereo information
RemoveStereochemistry(new_mol)
# Removing hydrogens
new_mol = RemoveHs(new_mol)
# Converting to SMILES
smi_rdkit = MolToSmiles(MolFromSmiles(MolToSmiles(new_mol)))
return smi_rdkit
def write_input_file(opt_input_path, xyz_path, smi, n_jobs, dft_base="3-21G*", dft_method="B3LYP", dft_mem_mb=512):
with open(xyz_path, "r") as xyz:
position = ""
for i, l in enumerate(xyz):
if i >= 2:
position += l
with open(opt_input_path, "w") as inp:
inp.write("%Chk=" + smi_to_filename(smi) + "\n")
inp.write("%NProcShared=" + str(n_jobs) + "\n")
inp.write("%mem=" + str(dft_mem_mb) + "MB\n")
inp.write("#P " + dft_method + "/" + dft_base + \
" opt Symmetry=(NoInt,NoGrad,None) gfprint pop=(full,HirshfeldEE)\n")
inp.write("\n" + smi + "\n\n")
inp.write("0 1\n")
inp.write(position + "\n\n\n")
class SharedLastComputation:
"""
Object that can be shared by several OPTEvaluationStrategy instances and that contains the values of the last
DFT computation. It allows to only perform one calculation in case of the evaluation of a combination of
OPTEvaluationStrategy instances.
"""
def __init__(self):
self.smiles = None
self.homo = None
self.lumo = None
self.gap = None
self.homo_m1 = None
def compress_log_file(log_path):
"""
Using Gzip to compress the log output file
:param log_path: path to the log file
:return:
"""
cmd = "gzip -f " + log_path
os.system(cmd)
class OPTEvaluationStrategy(EvaluationStrategy):
"""
Evaluation strategy running a DFT optimization using Gaussian 09 to assess HOMO or LUMO energies.
The DFT computation is only ran if the SMILES is identical after a molecular mechanics (MM) optimization using
OpenBabel.
The DFT computation is considered a success only if the molecule has kept the same SMILES.
A cache of already performed DFT computations can be provided. It must be a JSON file containing an entry for each
already computed aromatic canonical SMILES. Each molecule must be represented as a dictionary containing "homo"
and/or "lumo" keys with the associated value.
OpenBabel must be installed in a folder referenced with the $OPT_LIBS environment variable. It must
be set according to the following path. $OPT_LIBS/obabel/openbabel-2.4.1/bin/obabel
The $OPT_LIBS environment variable must also contain a script named $OPT_LIBS/dft.sh, starting a Gaussian
optimization of the input file in parameter.
"""
def __init__(self, prop, n_jobs=1, working_dir_path="/tmp/", cache_files=None, MM_program="obabel_mmff94",
cache_behaviour="retrieve_OPT_data", remove_chk_file=True, shared_last_computation=None,
dft_base="3-21G*", dft_method="B3LYP", dft_mem_mb=512):
"""
Initialization of the DFT evaluation strategy
:param prop: key of the property to be assessed. Can be "homo", "lumo", "gap" or "homo-1"
:param n_jobs: number of jobs for gaussian optimization
:param working_dir_path: directory in which computation files will be stored
:param cache_files: list of JSON file containing a cache of former computations
:param MM_program: program used to compute MM. Options are :
- "obabel" or "obabel_mmff94" for MMFF94 optimization using OpenBabel
- "rdkit" or "rdkit_mmff94" for MMFF94 optimization using RDKit
- "rdkit_uff" for UFF optimization using RDKit
:param cache_behaviour : configuration of the behaviour when cache files are given. "retrieve_OPT_data"
(default): if the molecule is known in the cache, no DFT computation is made and values are retrieved.
"compute_again_delete_files": DFT computation are made for all molecules but DFT files are removed for molecules
that are already in cache.
:param remove_chk_file: whether the G09 CHK file is removed after DFT computation (default:True)
:param shared_last_computation: SharedLastComputation instance to share the values of the last computation
values with several OPTEvaluationStrategy instances
:param dft_base: base of G09 DFT computation (default : "3-21G*")
:param dft_method method of G09 DFT computation (default : "B3LYP")
:param dft_mem_mb memory assigned to each DFT calculation in MB (default : 512)
"""
super().__init__()
self.prop = prop
self.n_jobs = n_jobs
self.scores = None
self.shared_last_computation = shared_last_computation
self.MM_program = MM_program
if cache_files is None:
self.cache_files = []
else:
self.cache_files = cache_files
self.cache = {}
# Reversing the list so that values of first files are taken primarily if there exists an intersection of
# SMILES keys
self.cache_files.reverse()
# Loading cache
for cache_file in self.cache_files:
with open(cache_file, "r") as f:
cache = json.load(f)
self.cache.update(cache)
# Creating the root directory if does not exist
os.makedirs(working_dir_path, exist_ok=True)
# Computing a unique identifier for the current instance
computed_uuid = str(uuid.uuid4())
# Computing the working directory path by concatenating the working directory root and the uuid
self.working_dir_path_uuid = join(working_dir_path, computed_uuid)
self.cache_behaviour = cache_behaviour
self.remove_chk_file = remove_chk_file
self.dft_base = dft_base
self.dft_method = dft_method
self.dft_mem_mb = dft_mem_mb
print("DFT MM " + str(self.MM_program))
print(str(len(self.cache.keys())) + " molecules in cache")
def keys(self):
return [self.prop]
def is_in_cache(self, smi):
return smi in self.cache
def get_cache_value(self, prop, smi):
# Computing the success value if the "success" key in in the entry
if "success" in self.cache[smi]:
success = self.cache[smi]["success"]
# Otherwise the success is computed as whether the property is not None
else:
success = prop in self.cache[smi] and self.cache[smi][prop] is not None
if prop in self.cache[smi]:
value = self.cache[smi][prop]
else:
value = None
success = False
return value, success
def remove_evaluation_files(self, post_opt_smi_path, xyz_path, opt_input_path, chk_path,
log_path, is_in_cache):
"""
Removing files created during the MM + DFT computation.
The existence of the files is checked before removal.
CHK file is removed iff. self.remove_chk_file is True
Log file is removed iff. molecule is in cache and self.cache_behaviour is set to "compute_again_delete_files"
:param post_opt_smi_path: path to the file containing the SMILES after DFT optimization (.smi)
:param xyz_path: path to the file containing the XYZ data after MM optimization (.xyz)
:param opt_input_path: path to the input of G09 (.inp)
:param chk_path: path to the CHK file generated by G09 (.chk)
:param log_path: path to the G09 LOG path (.log)
:param is_in_cache: whether the molecule is known in the cache
"""
remove_files([post_opt_smi_path, xyz_path, opt_input_path])
# Removing CHK file if self.remove_chk_file is set to True
if self.remove_chk_file:
remove_files([chk_path])
# Removing log path if solution is in cache and self.cache_behaviour is set to "compute_again_delete_files"
if self.cache_behaviour == "compute_again_delete_files" and is_in_cache:
remove_files([log_path])
def evaluate_individual(self, individual, to_replace_idx=None, file_prefix=""):
"""
Code from https://github.com/Cyril-Grl/AlphaSMILES (Cyril Grelier)
MIT License
Copyright (c) 2019 Cyril-Grl
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
"""
super().evaluate_individual(individual, to_replace_idx)
# Extracting SMILES
smi = individual.to_aromatic_smiles()
# Converting the smiles to file name compatible
filename = file_prefix + smi_to_filename(smi)
# Computing paths
post_opt_smi_path = join(self.working_dir_path_uuid, filename + ".opt.smi")
xyz_path = join(self.working_dir_path_uuid, filename + ".xyz")
opt_input_path = join(self.working_dir_path_uuid, filename + "_OPT.inp")
opt_log_path = join(self.working_dir_path_uuid, filename + "_OPT.log")
chk_path = join(self.working_dir_path_uuid, smi + ".chk")
# Computing whether the solution is known in the cache
ind_is_in_cache = self.is_in_cache(smi)
# If already calculated and cache behaviour is set to "retrieve_OPT_data", loading from cache
if ind_is_in_cache and self.cache_behaviour == "retrieve_OPT_data":
homo_cache, success_homo_cache = self.get_cache_value("homo", smi)
homo_m1_cache, success_homo_m1_cache = self.get_cache_value("homo-1", smi)
lumo_cache, success_lumo_cache = self.get_cache_value("lumo", smi)
gap_cache, success_gap_cache = self.get_cache_value("gap", smi)
if self.prop == "gap":
success = success_homo_cache and success_lumo_cache and success_gap_cache
score = gap_cache
scores = [gap_cache]
elif self.prop == "homo":
success = success_homo_cache
score = homo_cache
scores = [homo_cache]
elif self.prop == "homo-1":
success = success_homo_m1_cache
score = homo_m1_cache
scores = [homo_m1_cache]
elif self.prop == "lumo":
success = success_lumo_cache
score = lumo_cache
scores = [lumo_cache]
if not success:
raise EvaluationError("DFT failure in cache for " + smi)
else:
return score, scores
# Case in which the computation has just been performed by another OPTEvaluationStrategy instance that
# shares the same SharedLastComputation instance
elif self.shared_last_computation is not None and individual.to_aromatic_smiles() == self.shared_last_computation.smiles:
# Returning score
if self.prop == "homo":
return self.shared_last_computation.homo, [self.shared_last_computation.homo]
elif self.prop == "lumo":
return self.shared_last_computation.lumo, [self.shared_last_computation.lumo]
elif self.prop == "gap":
return self.shared_last_computation.gap, [self.shared_last_computation.gap]
elif self.prop == "homo-1":
return self.shared_last_computation.homo_m1, [self.shared_last_computation.homo_m1]
# If score never computed or cache behaviour is set to "compute_again_delete_files", starting DFT
else:
# Creating the working directory if does not exist
os.makedirs(self.working_dir_path_uuid, exist_ok=True)
print("computing dft for " + str((individual.to_aromatic_smiles())))
try:
# Performing Obabel MM
if self.MM_program == "obabel" or self.MM_program == "obabel_mmff94":
# Converting SMILES to XYZ after computing MM (RDKit MMFF94)
xyz_str, success_MM = obabel_mmff94_xyz(smi)
# Performing RDkit MM
elif self.MM_program == "rdkit" or self.MM_program == "rdkit_mmff94":
# Converting SMILES to XYZ after computing MM (RDKit MMFF94)
xyz_str, success_MM = rdkit_mm_xyz(smi, ff="MMFF94", max_iterations=500)
elif self.MM_program == "rdkit_uff":
# Converting SMILES to XYZ after computing MM (RDKit MMFF94)
xyz_str, success_MM = rdkit_mm_xyz(smi, ff="UFF", max_iterations=500)
if success_MM:
# Writing optimized XYZ to file
with open(xyz_path, "w") as f:
f.writelines(xyz_str)
# Creating input file for OPT
write_input_file(opt_input_path, xyz_path, smi, self.n_jobs, dft_base=self.dft_base,
dft_method=self.dft_method, dft_mem_mb=self.dft_mem_mb)
# Calculate OPT in the working directory
command_opt = "cd " + self.working_dir_path_uuid + "; " + join(os.environ["OPT_LIBS"],
"dft.sh") + " " + opt_input_path
print("Starting OPT")
start = time.time()
os.system(command_opt)
stop = time.time()
print("Execution time OPT: " + repr(int(stop - start)) + "s")
# Checking that normal termination occurred
with open(opt_log_path, "r") as log:
last_line = log.readlines()[-1]
# if the OTP end up well
if "Normal termination" in last_line:
# Extracting the smiles from the log file
command_obabel = "obabel -ilog " + opt_log_path + " -ocan -O " + post_opt_smi_path
os.system(command_obabel)
post_opt_smi_rdkit = load_obabel_smi(post_opt_smi_path)
# If before/after SMILES are identical
if smi == post_opt_smi_rdkit:
with open(opt_log_path, "r") as log:
data = cclib.io.ccread(log, optdone_as_list=True)
print("There are %i atoms and %i MOs" % (data.natom, data.nmo))
homos = data.homos
energies = data.moenergies
if len(homos) == 1:
homo = energies[0][homos[0]]
lumo = energies[0][homos[0] + 1]
homo_m1 = energies[0][homos[0] - 1]
gap = abs(homo - lumo)
# Removing files
self.remove_evaluation_files(post_opt_smi_path, xyz_path, opt_input_path, chk_path,
opt_log_path, is_in_cache=ind_is_in_cache)
# Compressing log file
compress_log_file(opt_log_path)
# Saving values in SharedLastComputation instance if defined
if self.shared_last_computation is not None:
self.shared_last_computation.smiles = individual.to_aromatic_smiles()
self.shared_last_computation.homo = homo
self.shared_last_computation.lumo = lumo
self.shared_last_computation.gap = gap
self.shared_last_computation.homo_m1 = homo_m1
# Returning score
if self.prop == "homo":
return homo, [homo]
elif self.prop == "lumo":
return lumo, [lumo]
elif self.prop == "gap":
return gap, [gap]
elif self.prop == "homo-1":
return homo_m1, [homo_m1]
else:
raise EvaluationError("DFT error : |homos| > 1 for " + smi)
else:
raise EvaluationError(
"DFT error : Different SMILES : " + smi + " " + post_opt_smi_rdkit)
else:
raise EvaluationError("DFT error : Error during OPT for " + smi)
else:
raise EvaluationError("MM error")
except Exception as e:
print(e)
# Removing files
self.remove_evaluation_files(post_opt_smi_path, xyz_path, opt_input_path, chk_path, opt_log_path,
is_in_cache=ind_is_in_cache)
if exists(opt_log_path):
compress_log_file(opt_log_path)
raise EvaluationError("DFT caused exception " + str(e))
| jules-leguy/EvoMol | evomol/evaluation_dft.py | evaluation_dft.py | py | 26,818 | python | en | code | 48 | github-code | 13 |
25038372271 | import speech_recognition as sr
r = sr.Recognizer()
speech = sr.Microphone()
word = "hello this is a test"
with speech as source:
print("say hello this is a test")
audio = r.adjust_for_ambient_noise(source)
audio = r.listen(source)
try:
recog = r.recognize_wit(audio, key = "6Y4KLO4YTWDQSYQXGONPHAVB3IRSWFRN")
except sr.UnknownValueError:
print("could not understand audio")
except sr.RequestError as e:
print("Could not request results ; {0}".format(e))
if(recog.lower() == word.lower()):
print("Match! \nYou said "+recog)
else:
print("Fail :( you said "+recog) | msalem-twoway/TwoWayVoice | FunctionalityTests/matchMicToPhrase.py | matchMicToPhrase.py | py | 611 | python | en | code | 0 | github-code | 13 |
32837236415 | def lyrics_to_frequencies(lyrics):
myDict = {}
for word in lyrics:
if word in myDict:
myDict[word] += 1
else:
myDict[word] = 1
return myDict
rains = ['And', 'who', 'are', 'you', 'the', 'proud', 'lord', 'said',
'that', 'I', 'must', 'bow', 'so', 'low',
'Only', 'a', 'cat', 'of', 'a', 'different', 'coat',
"that's", 'all', 'the', 'truth', 'I', 'know',
'In', 'a' ,'coat', 'of' ,'gold' ,'or', 'a', 'coat', 'of', 'red',
'a' ,'lion', 'still', 'has', 'claws',
'And', 'mine', 'are' ,'long', 'and' ,'sharp', 'my', 'lord',
'as' ,'long' ,'and' ,'sharp', 'as', 'yours',
'And', 'so' ,'he' 'spoke', 'and', 'so' ,'he', 'spoke',
'that', 'lord' 'of' ,'Castamere',
'And' ,'now', 'the', 'rains', 'weep', "o'er" ,'his', 'hall',
'with', 'no', 'one', 'there', 'to', 'hear',
'Yes', 'now', 'the', 'rains', 'weep', "o'er", 'his', 'hall',
'and', 'not' ,'a', 'soul', 'to', 'hear',]
castamere = lyrics_to_frequencies(rains)
def most_common_words(freqs):
values = freqs.values()
best = max(values)
words = []
for k in freqs:
if freqs[k] == best:
words.append(k)
return (words, best) | MysticSaiyan/MITx-6.00.1x | Python Modules/dictionary.py | dictionary.py | py | 1,148 | python | en | code | 0 | github-code | 13 |
70828874579 | # !/usr/bin/python
# -*- coding:utf-8 -*-
import sys
import time
import logging
from PyQt6.QtWidgets import (QWidget, QDialog, QHBoxLayout, QVBoxLayout, QTabWidget, QGridLayout, QLabel, QLineEdit, QTextEdit, QFileDialog, QToolTip, QPushButton, QApplication)
from PyQt6.QtGui import QFont
from PyQt6.QtCore import (QSize, QRect, QObject, pyqtSignal, QThread)
from ieee_conference_spider import IEEESpider
class LogHandler(logging.Handler):
def __init__(self, parent):
super().__init__()
self.parent = parent
def emit(self, record):
try:
print(self.format(record))
self.parent.print_log(self.format(record))
QApplication.processEvents()
except Exception:
self.handleError(record)
class SpiderThread(QObject):
_spider_finish = pyqtSignal()
def __init__(self):
super().__init__()
#self.flag_running = False
self.ieee_spider = IEEESpider()
def __del__(self):
print('>>> __del__')
def run(self, conference_ID, save_filename, logger):
self.ieee_spider.flag_running = True
self.ieee_spider.get_article_info(conference_ID, save_filename, logger)
self._spider_finish.emit()
#self.flag_running = False
class PaperCollector(QWidget):
_start_spider = pyqtSignal(str, str, logging.Logger)
def __init__(self):
super().__init__()
self.initUI()
#sys.stdout = LogStream(newText=self.onUpdateText)
self.spiderT = SpiderThread()
self.thread = QThread(self)
self.spiderT.moveToThread(self.thread)
self._start_spider.connect(self.spiderT.run) # 只能通过信号槽启动线程处理函数
#self.spiderT._log_info.connect(self.print_log)
self.spiderT._spider_finish.connect(self.finish_collect_paper)
def spiderUI(self):
"""
Define the UI playout of spider page.
"""
# input the conference ID and saved file name
self.conferenceID_label = QLabel('IEEE conference ID: ')
self.conferenceID_edit = QLineEdit()
self.conferenceID_button = QPushButton('Search')
self.conferenceID_button.clicked.connect(IEEESpider.search_conferenceID)
self.saveFile_label = QLabel('Save to: ')
self.saveFile_edit = QLineEdit()
self.saveFile_button = QPushButton('Browse')
self.saveFile_button.clicked.connect(self.get_save_file_name)
# button to start crawing
self.startCrawling_button = QPushButton('Start')
self.startCrawling_button.setToolTip('Click and wait for collecting published paper data ^o^')
self.startCrawling_button.clicked.connect(self.start_collect_paper)
self.stopCrawling_button = QPushButton('Stop')
self.stopCrawling_button.setToolTip('Click to stop collecting data ^_^')
self.stopCrawling_button.clicked.connect(self.stop_collect_paper)
# print log
self.process = QTextEdit(readOnly=True)
self.process.setFont(QFont("Source Code Pro",9))
self.spider_grid = QGridLayout()
self.spider_grid.setSpacing(10)
self.spider_grid.addWidget(self.conferenceID_label, 1, 0)
self.spider_grid.addWidget(self.conferenceID_edit, 1, 1)
self.spider_grid.addWidget(self.conferenceID_button, 1, 2)
self.spider_grid.addWidget(self.saveFile_label, 2, 0)
self.spider_grid.addWidget(self.saveFile_edit, 2, 1)
self.spider_grid.addWidget(self.saveFile_button, 2, 2)
self.spider_grid.addWidget(self.startCrawling_button, 3, 0)
self.spider_grid.addWidget(self.stopCrawling_button, 3, 2)
self.spider_grid.addWidget(self.process, 4, 0, 3, 3)
self.spider_widget = QWidget()
self.spider_widget.setLayout(self.spider_grid)
def analyzerUI(self):
"""
Define the UI playout of analyzer page.
"""
self.btnn = QPushButton('TEST')
self.analyzer_grid = QGridLayout()
self.analyzer_grid.setSpacing(10)
self.analyzer_grid.addWidget(self.btnn, 1, 0)
self.analyzer_widget = QWidget()
self.analyzer_widget.setLayout(self.analyzer_grid)
def reservedUI(self):
"""
Define the UI playout of analyzer page.
"""
self.image = QTextEdit(readOnly=True)
self.image.setFont(QFont("Source Code Pro",9))
self.reserved_grid = QGridLayout()
self.reserved_grid.setSpacing(10)
self.reserved_grid.addWidget(self.image, 1, 0)
self.reserved_widget = QWidget()
self.reserved_widget.setLayout(self.reserved_grid)
def sidebarUI(self):
"""
Define the UI playout of sidebar.
"""
self.sidebar_btn_1 = QPushButton('Collector', self)
self.sidebar_btn_1.clicked.connect(self.sidebar_button_1)
self.sidebar_btn_2 = QPushButton('Analyzer', self)
self.sidebar_btn_2.clicked.connect(self.sidebar_button_2)
self.sidebar_btn_3 = QPushButton('Reserved', self)
self.sidebar_btn_3.clicked.connect(self.sidebar_button_3)
sidebar_layout = QVBoxLayout()
sidebar_layout.addWidget(self.sidebar_btn_1)
sidebar_layout.addWidget(self.sidebar_btn_2)
sidebar_layout.addWidget(self.sidebar_btn_3)
sidebar_layout.addStretch(5)
sidebar_layout.setSpacing(20)
self.sidebar_widget = QWidget()
self.sidebar_widget.setLayout(sidebar_layout)
def sidebar_button_1(self):
self.right_widget.setCurrentIndex(0)
def sidebar_button_2(self):
self.right_widget.setCurrentIndex(1)
def sidebar_button_3(self):
self.right_widget.setCurrentIndex(2)
def initUI(self):
"""
Define the overall UI playout.
"""
QToolTip.setFont(QFont('Times', 10))
self.sidebarUI()
self.spiderUI()
self.analyzerUI()
self.reservedUI()
# 多个标签页
self.right_widget = QTabWidget()
self.right_widget.tabBar().setObjectName("mainTab")
self.right_widget.addTab(self.spider_widget, '')
self.right_widget.addTab(self.analyzer_widget, '')
self.right_widget.addTab(self.reserved_widget, '')
# 隐藏了标签部件的标签并初始化显示页面
self.right_widget.setCurrentIndex(0)
self.right_widget.setStyleSheet('''QTabBar::tab{width: 0; height: 0; margin: 0; padding: 0; border: none;}''')
# overall layout
main_layout = QHBoxLayout()
main_layout.addWidget(self.sidebar_widget)
main_layout.addWidget(self.right_widget)
main_layout.setStretch(0, 40)
main_layout.setStretch(1, 200)
self.setLayout(main_layout)
#self.setLayout(self.sprder_grid)
self.setGeometry(300, 300, 850, 300)
self.setWindowTitle('IEEE paper collector (by Glooow)')
self.show()
def get_save_file_name(self):
"""
Retrive the name of csv file to save.
"""
self.save_file_name = QFileDialog.getSaveFileName(self, '选择保存路径', '', 'csv(*.csv)') # (file_name, file_type)
self.saveFile_edit.setText(self.save_file_name[0])
def start_collect_paper(self):
if self.thread.isRunning():
return
self.startCrawling_button.setEnabled(False)
self.startCrawling_button.setToolTip('I\'m trying very hard to collect papers >_<')
# 先启动QThread子线程
#self.spiderT.flag_running = True
self.thread.start()
# 发送信号,启动线程处理函数
# 不能直接调用,否则会导致线程处理函数和主线程是在同一个线程,同样操作不了主界面
global logger
self._start_spider.emit(self.conferenceID_edit.text(), self.saveFile_edit.text(), logger)
def finish_collect_paper(self):
self.startCrawling_button.setEnabled(True)
self.startCrawling_button.setToolTip('Click and wait for collecting published paper data ^o^')
self.spiderT.ieee_spider.flag_running = False
self.thread.quit()
def stop_collect_paper(self):
if not self.thread.isRunning():
return
self.spiderT.ieee_spider.flag_running = False
time.sleep(15)
self.thread.quit() # 退出
#self.thread.wait() # 回收资源
#self.show_dialog('stop!')
def print_log(self, s):
self.process.append(s)
def show_dialog(self, info):
"""
Pop up dialogs for debug.
"""
hint_dialog = QDialog()
hint_dialog.setWindowTitle('Hint info')
#hint_dialog.setWindowModality(PyQt6.QtCore.Qt.NonModal)
hint_info = QLabel(info, hint_dialog)
hint_info.adjustSize()
padding = 20
max_width = 360
# set the maximum width
if hint_info.size().width() > max_width:
hint_info.setGeometry(QRect(0, 0, max_width, 80))
hint_info.setWordWrap(True)
hint_info.move(padding, padding)
hint_dialog.resize(hint_info.size() + QSize(padding*2, padding*2))
hint_dialog.exec()
logger = None
def main():
app = QApplication(sys.argv)
ex = PaperCollector()
global logger
logger = logging.getLogger("logger")
logger.setLevel(logging.INFO)
formater = logging.Formatter(fmt="%(asctime)s [%(levelname)s] : %(message)s"
,datefmt="%Y/%m/%d %H:%M:%S")
handler = LogHandler(ex)
handler.setFormatter(formater)
logger.addHandler(handler)
sys.exit(app.exec())
if __name__ == '__main__':
main()
| Glooow1024/paper_collector | main.py | main.py | py | 9,685 | python | en | code | 0 | github-code | 13 |
41267518876 | '''
@author Piero Orderique
@date 12 Jan 2021
This file is for testing linalg module
'''
from linalg import Matrix
mat1 = Matrix([
[1, 1, 2],
[3, 5, 8],
[3, 0, 4],
])
mat2 = Matrix([
[7, 0, 6],
[9, 8, 7],
[3, 9, 5],
])
print(mat1 + mat2) | pforderique/Python-Scripts | RandomScripts/Math/Linear_Algebra_Library/test_runner.py | test_runner.py | py | 266 | python | en | code | 1 | github-code | 13 |
30999315241 | from random import Random
from time import time
from math import cos
from math import pi
from inspyred import ec
import inspyred
from inspyred.ec import terminators
import math
import numpy as np
import networkx as nx
import matplotlib.pyplot as plt
import matplotlib.delaunay as triang
import matplotlib.patches as patch
from traveling_santa_evo_tsp import TSP
class EVO(object):
def __init__(self, data,route):
self.route = route
self.points = []
for i, p in enumerate(data):
self.points.insert(i, (np.int_(p[1]) ,np.int_(p[2]) ) )
self.weights = [[0 for _ in range(len(self.points))] for _ in range(len(self.points))]
for i, p in enumerate(self.points):
for j, q in enumerate(self.points):
self.weights[i][j] = math.sqrt((p[0] - q[0])**2 + (p[1] - q[1])**2)
def calc_path_lenght(self,path):
total = 0
for c in path:
total += self.weights[c[0]][c[1]]
return total
def calc_path_duplicates(self,route0, route1):
duplicates = 0
for inx0, edge0 in enumerate(route0):
for inx1, edge1 in enumerate(route1):
if( (edge1[0] == edge0[0] and edge1[1] == edge0[1]) or (edge1[1] == edge0[0] and edge1[0] == edge0[1]) ):
duplicates += 1
return duplicates
def solve(self, display=True):
prng = Random()
prng.seed(time())
problem = TSP(self.weights,self.route)
ea = ec.EvolutionaryComputation(prng)
ea.selector = ec.selectors.tournament_selection
ea.variator = [ec.variators.partially_matched_crossover,
ec.variators.inversion_mutation]
ea.replacer = ec.replacers.generational_replacement
ea.terminator = ec.terminators.generation_termination
final_pop = ea.evolve(generator=problem.generator,
evaluator=problem.evaluator,
bounder=problem.bounder,
maximize=problem.maximize,
pop_size=200,
max_generations=60,
tournament_size=10,
num_selected=100,
num_elites=1)
best = max(ea.population)
self.tour = []
for i, p in enumerate(best.candidate):
self.tour.insert(i, (best.candidate[i-1] , p) )
print('Best Solution:')
total = 0
for c in self.tour:
total += self.weights[c[0]][c[1]]
print('Distance: {0}'.format(total))
print('Tour: ' , self.tour)
print('Route: ' , self.route )
return ea | roosnic1/twotsp | traveling_santa_evo.py | traveling_santa_evo.py | py | 2,776 | python | en | code | 4 | github-code | 13 |
34950411455 | import configparser
from datetime import datetime
import os
from pyspark.sql import SparkSession
from pyspark.sql.functions import udf, col, monotonically_increasing_id
from pyspark.sql.functions import year, month, dayofmonth, hour, weekofyear, date_format,to_timestamp,dayofweek,from_unixtime
import pyspark.sql.functions as f
config = configparser.ConfigParser()
config.read('dl.cfg')
os.environ['AWS_ACCESS_KEY_ID']=config['AWS_CREDS']['AWS_ACCESS_KEY_ID']
os.environ['AWS_SECRET_ACCESS_KEY']=config['AWS_CREDS']['AWS_SECRET_ACCESS_KEY']
def create_spark_session():
spark = SparkSession \
.builder \
.config("spark.jars.packages", "org.apache.hadoop:hadoop-aws:2.7.0") \
.getOrCreate()
return spark
def process_song_data(spark, input_data, output_data):
"""
get filepath to song data file, this will be accessing the public s3 bucket
of udacity that we specified in the spark session config to connect to
"""
song_data = os.path.join(input_data, "song_data/*/*/*/*.json" )
# read song data file from public Udacity S3 Bucket
song_data_df = spark.read.json(song_data)
# extract columns to create songs table
songs_table = song_data_df.select('artist_id', 'duration', 'song_id', 'title', 'year').dropDuplicates()
# write songs table to parquet files partitioned by year and artist,
# used overwrite to replace any songs table if it was in there or create it if it doesn't exist
output_songs_table = songs_table.write.partitionBy("year", "artist_id").parquet(output_data + "output-songs", "overwrite")
# extract columns to create artists table
artists_table = song_data_df.select('artist_id', 'artist_name', 'artist_location', 'artist_latitude', 'artist_latitude').dropDuplicates()
# write artists table to parquet files, used overwrite so it replaces the data f they were there or just put it f nothing is there
output_artists_table = artists_table.write.parquet(output_data + "output-artists", "overwrite")
def process_log_data(spark, input_data, output_data):
# get filepath to log data file
log_data = input_data, "log_data/*.json"
# read log data file
log_df = spark.read.json(log_data)
# filter by actions for song plays
log_df = log_df.filter(log_df['page'] == 'NextSong')
# extract columns for users table
users_table = log_df.select('artist_id', 'artist_name', 'artist_location', 'artist_latitude', 'artist_longitude').dropDuplicates()
# write users table to parquet files
output_users_table = users_table.write.parquet(output_data + "output-users", "overwrite")
# create datetime column from original timestamp column
get_datetime = udf(lambda x : datetime.fromtimestamp(x/1000.0).strftime('%Y-%m-%d %H:%M:%S'))
log_df = log_df.withColumn("date_timestamp",get_datetime(col("ts")))
# extract columns to create time table from the created datetime column
time_table = log_df.select('date_timestamp')
time_table = time_table.select('date_timestamp',
hour('date_timestamp').alias('hour'),
dayofmonth('date_timestamp').alias('day'),
weekofyear('date_timestamp').alias('week'),
month('date_timestamp').alias('month'),
year('date_timestamp').alias('year'))
# write time table to parquet files partitioned by year and month
time_table = time_table.write.partitionBy("year", "month").parquet(output_data + "output-times", "overwrite")
# read in song data to use for songplays table
song_df = os.path.join (input_data, "song_data/*/*/*/*.json" )
song_df = spark.read.json(song_df)
# extract columns from joined song and log datasets to create songplays table using title, duration and artist_name
songplays_table = song_df.join(log_df, (song_df.title == log_df.song) & (song_df.duration == log_df.length) & (song_df.artist_name == log_df.artist ))
# since songplay_id is serial generated we used monotonically_increasing_id to genrate it.
songplays_table = songplays_table.withColumn("songplay_id",monotonically_increasing_id())
tsFormat = "yyyy/MM/dd HH:MM:ss z"
# Creating the whole songplay table by selecting the multiple fields
songplays_table = songplays_table.withColumn(
'start_time', to_timestamp(date_format((col("ts")/1000).cast(dataType=TimestampType()),tsFormat),tsFormat)).select("songplay_id","start_time",col("userId").alias("user_id"),"level","song_id","artist_id",col("sessionId").alias("session_id"),col("artist_location").alias("location"),"userAgent",month(col("start_time")).alias("month"),year(col("start_time")).alias("year"))
# write songplays table to parquet files partitioned by year and month
songplays_table = songplays_table.write.partitionBy("year", "month").parquet(os.path.join(output_data,"songplays"),"overwrite")
def main():
spark = create_spark_session()
input_data = "s3a://udacity-dend/"
output_data = "s3/buckets/output-fact-table-data/"
process_song_data(spark, input_data, output_data)
process_log_data(spark, input_data, output_data)
if __name__ == "__main__":
main()
| MaryamAlMansour/Wrangling-with-Spark | home/etl.py | etl.py | py | 5,323 | python | en | code | 0 | github-code | 13 |
72320848657 | from django.http import HttpResponse
from rest_framework.renderers import JSONRenderer
from django.urls import reverse_lazy
from django.views.generic import ListView, DetailView, CreateView
from django.views.generic.base import TemplateView
from ToDo.serializer import TaskSerrializer
from ToDo.models import Task, Categories
class HomePageView(TemplateView):
template_name = 'base.html'
class TaskDetailView(DetailView):
model = Task
template_name = 'task_detail.html'
class CategoriesDetailView(DetailView):
model = Categories
template_name = 'categories_detail.html'
class TaskListView(ListView):
model = Task
template_name = 'task_list.html'
context_object_name = "tasks"
def get_queryset(self):
queryset = {'expire_list': Task.objects.expire_task().all(),
'going_list': Task.objects.going_task().all()}
return queryset
class TaskCreateView(CreateView):
model = Task
template_name = 'task_new.html'
fields = '__all__'
success_url = reverse_lazy('task_new')
class CategoryListView(ListView):
model = Categories
template_name = 'categories.html'
context_object_name = "categories"
def get_queryset(self):
queryset = {'empty_list': Categories.object.empty().all(),
'non_empty_list': Categories.object.non_empty().all()}
return queryset
def task_detail(resquest):
tsk = Task.objects.all()
serializer = TaskSerrializer(tsk, many=True)
json_data = JSONRenderer().render(serializer.data)
return HttpResponse(json_data,content_type='application/json')
| AshtiNematian/To_Do_List | Reminder/Reminder/ToDo/views.py | views.py | py | 1,621 | python | en | code | 0 | github-code | 13 |
6910666626 | import sys
import os
import time
from pathlib import Path
from abstract_component import NotificationMessage
# add parent directory to import space, so we can keep directory structure
current = os.path.dirname(os.path.realpath(__file__))
parent = os.path.dirname(current)
sys.path.append(parent)
from sirene.player import SoundPlayer
from luwiBlaulicht.blaulicht import BlueLightSwitch
from api_adapter import ApiAdapter
from Moritz_CamTest.cam_shit import CameraAnalyst
from luwiPowerSkript.waterpump import WaterPump
# TODO: only temporary to "compile"
from katy_intersectionsApi.intersection_guide import IntersectionGuide, Direction
class GlobalController:
home_id = 0
def __init__(self):
self.busy =False
self.api_adapter = ApiAdapter(ip="192.168.171.91", port=5000)
self.sound_player = SoundPlayer("/home/jens/repo/sirene")
self.sound_player.connect_bt()
self.blue_light = BlueLightSwitch()
self.line_analyst = CameraAnalyst(self)
self.intersection_guide = IntersectionGuide()
self.pump_ctl = WaterPump()
self.needs_privileges = False
self.cached_message = None
self.cached_value = None
def notify_on_forcestop(self):
self.api_adapter.send_stop_request()
self.cached_message = NotificationMessage.FORCE_STOP
print(f"stopped")
def notify_on_center(self):
if self.cached_message is None or self.cached_message != NotificationMessage.CENTER:
self.api_adapter.send_center_request()
print(f"center")
self.cached_message = NotificationMessage.CENTER
def notify_on_left(self, steering_value : int = -1):
if self.cached_message is None or self.cached_message != NotificationMessage.LEFT or self.cached_value != steering_value:
if steering_value == -1:
self.api_adapter.send_left_request()
else:
self.api_adapter.send_left_request(steering_value)
print(f"left {steering_value}")
self.cached_message = NotificationMessage.LEFT
self.cached_value = steering_value
def notify_on_right(self, steering_value : int = -1):
if self.cached_message is None or self.cached_message != NotificationMessage.RIGHT or self.cached_value != steering_value:
if steering_value == -1:
self.api_adapter.send_right_request()
else:
self.api_adapter.send_right_request(steering_value)
print(f"right {steering_value}")
self.cached_message = NotificationMessage.RIGHT
self.cached_value = steering_value
def notify_on_destination_reached(self):
self.reach_destination()
self.cached_message = NotificationMessage.DESTINATION_REACHED
print(f"reached")
def notify_on_intersection(self):
self.intersection_guide.find_intersection()
direction = self.intersection_guide.get_current_direction()
self.turn_after_intersection(direction)
def turn_after_intersection(self, direction):
# TODO: check if this is mechanically o.k.
if direction == Direction.LEFT:
self.api_adapter.send_left_request()
elif direction == Direction.RIGHT:
self.api_adapter.send_right_request()
def reach_destination(self):
print("reach dest")
self.api_adapter.send_stop_request()
if self.needs_privileges:
self.sound_player.stop()
self.blue_light.stop()
# self.line_analyst.stop()
self.busy = True
print("foo")
self.pump_ctl.start_pumping_water()
# simulate recognition of extinguished fire:
print("exstinguishing...")
time.sleep(3)
print("fire dead")
self.pump_ctl.stop_pumping_water()
self.u_turn()
self.intersection_guide.reach_dest()
self.set_destination_home()
self.busy = False
self.start_drive_to_destination(self.home_id)
else:
print("reached fire station")
def u_turn(self):
# TODO: implement mechanical u_turn
print("not implemented yet")
input("press any key to signalize you manually uturned the car.")
def start_drive_to_destination(self, destination_id=1):
if self.needs_privileges:
self.blue_light.start()
self.sound_player.start()
self.line_analyst.start()
print("goo")
self.api_adapter.send_go_request()
def set_destination_home(self):
self.needs_privileges = False
def set_destination_fire(self):
self.needs_privileges = True
ctl = GlobalController()
def start(destination_id=1):
ctl.set_destination_fire()
ctl.start_drive_to_destination(destination_id)
start()
| lwilfert/05FeuerRoboter | katy_mainControl/global_controller.py | global_controller.py | py | 4,880 | python | en | code | 0 | github-code | 13 |
34799486412 | import os
def grab_images_from_video(video_path="", save_dir="", filename=""):
# -r 一秒截取多少张
# -vf fps=1/20 每隔20秒截取一张
os.system(
'ffmpeg -i ' + video_path + ' -f image2 -q:v 2 -vf fps=fps=1/2 ' + save_dir + '/' + filename + '_image-%4d.jpg')
video_path = '/home/xiehuaiqi/Videos/vlc_video_recording/'
save_path = '/home/xiehuaiqi/Pictures/皮带跑偏0610/'
for name in os.listdir(video_path):
print(name)
file = name.split('.')[0]
grab_images_from_video(video_path + name, save_path, file)
| Xiehuaiqi/python_script | cut_video/ffmpeg_cut.py | ffmpeg_cut.py | py | 555 | python | en | code | 0 | github-code | 13 |
27718378832 | import math
import numpy as np
def dot_product(input):
# print(sum([item[0]*item[1] + bias for item in input]))
return sigm(sum([item[0] * item[1] for item in input]))
def sigm(x):
return 1 / (1 + math.e ** (-x))
def count_error(expected, predicted):
return predicted - expected
def predict(point, weights_h, weights_o):
outsH = [dot_product([[point[0], weights_h[0 + 2 * i]], [point[1], weights_h[1 + 2 * i]]]) for i in range(2)]
out = dot_product([[outsH[0], weights_o[0]], [outsH[1], weights_o[1]]])
return outsH, out
def backpropagation(error, outO, outH, weightsO, weightsH, input, learn_const):
weightsO[0] -= learn_const * (outH[0] * error)
weightsO[1] -= learn_const * (outH[1] * error)
weightsH[0] -= learn_const * (input[0] * error * weightsO[0])
weightsH[1] -= learn_const * (input[1] * error * weightsO[0])
weightsH[2] -= learn_const * (input[0] * error * weightsO[1])
weightsH[3] -= learn_const * (input[1] * error * weightsO[1])
return weightsO, weightsH
def start_prog(epochs):
inputs = [[0, 0], [0, 1], [1, 0], [1, 1]]
xored = [0, 1, 1, 0]
learn_const = 0.1
weightsH = [np.random.uniform() for i in range(4)]
weightsO = [np.random.uniform() for i in range(2)]
for epoch in range(epochs):
for index, point in enumerate(inputs):
outsH, out = predict(point, weightsH, weightsO)
error = count_error(xored[index], out)
backpropagation(error, out, outsH, weightsO, weightsH, point, learn_const)
for index, point in enumerate(inputs):
# print(predict(point, biases, weightsH, weightsO))
print("Point {} is expeted to be {} and was predicted as {}".format(point, xored[index],
predict(point, weightsH, weightsO)[1]))
start_prog(1)
| MarekUlip/CollegePythonScripts | Nonconventional-algs/navy_backpropag2.py | navy_backpropag2.py | py | 1,874 | python | en | code | 0 | github-code | 13 |
73389482577 | from prototype import Search, State, Action
from typing import Callable, Optional, Any, List, Union
import time
import random
import math
import pickle
from multiprocessing import Manager, Process
'''
A Prototype of a Node
'''
class Node:
def __init__(self, state:State, parent=None):
self.state = state
self.parent = parent
self.children = {} # {action:Node(stateAfterAction, self)}
self.numVisits = 0
self.utilities = None
def isLeaf(self)->bool:
# A terminal state is considered a leaf node
return len(self.children)==0 or self.state.isTerminal()
def __str__(self, level=0):
ret = "\t"*level+repr(self)+"\n"
for action, node in self.children.items():
ret += node.__str__(level+1)
return ret
def __repr__(self):
#return '<tree node representation>'
s=[]
s.append("rewards: "+str(self.utilities))
s.append("numVisits: "+str(self.numVisits))
s.append("children/actions: " + str(list(self.children.keys())))
return str(self.__class__.__name__)+": {"+", ".join(s)+"}"
'''
def __str__(self)->str:
s=[]
s.append("rewards: "+str(self.utilities))
s.append("numVisits: "+str(self.numVisits))
s.append("children/actions: " + str(list(self.children.keys())))
return str(self.__class__.__name__)+": {"+", ".join(s)+"}"
'''
'''
A Monte Carlo Tree Search Object.
It samples the search space and expands the search tree according to promising nodes.
Less promising nodes are visited from time to time.
'''
class MCTS(Search):
def __init__(self,
selectionPolicy:Callable[[Node],Node],
expansionPolicy:Callable[[State], List[Action]],
rollOutPolicy:Callable[[State],Any],
utilitySumFunc:Callable[[Any, Any], Any]=sum,
utilityIdx:Optional[List[int]]=None
):
'''
selectionPolicy: Given the current node, which child node should be selected to traverse to?
expansionPolicy: Given the current (leaf) node, which child node should be expanded (grown) first?
rollOutPolicy: Given the current node/state, how should a playout be completed? What's the sequence of action to take?
utilitySumFunc: function used to sum two rewards. The default is sum()
utilityIdx: Applicable if the utilities are encoded with multiple elements, each representing different agents' utility
For example utility =(0,1,1). utilityIdx:=2 means that only utility[utilityIdx] is considered.
'''
self.selectionPolicy = selectionPolicy
self.expansionPolicy = expansionPolicy # function that returns a seq of actions
self.rollOutPolicy = rollOutPolicy
self.utilitySumFunc = utilitySumFunc
self.utilityIdx = utilityIdx
def search(self,
state:State,
maxIteration:Callable=(lambda: 1000000),
maxTimeSec:Callable=(lambda: 1000),
simPerIter:Callable=(lambda:1),
breakTies:Callable[[List[Action]],Action]=random.choice
)->Action:
'''
Search for the best action to take given a state.
The search is stopped when the maxIteration or maxTimeSec is hitted.
Args:
simPerIter: number of simulation(rollouts) from the chosen node.
breakTies: Function used to choose an node from multiple equally good node.
'''
self.root = Node(state, None)
self.simPerIter = simPerIter()
maxTime = maxTimeSec()
self.timeMax = time.time()+maxTime
self.maxIter = maxIteration()
self.breakTies = breakTies
# Spawn a process to IDS for an action
# Kill the process when time is up and return the latest action found
with Manager() as manager:
# Using a queue to share objects
q = manager.Queue()
p = Process(target=self._search, args=[q])
p.start()
# Usage: join([timeout in seconds])
p.join(maxTime)
if p.is_alive():
p.terminate()
p.join()
# Get the latest chosen action
action = None
while not q.empty(): action = q.get()
# If the search doesn't give any action, choose the first available action as the default
if not action:
action = self.expansionPolicy(state)[0]
print("Fail to search for an action - return the first possible action found.")
#print("Player take", state.getCurrentPlayerSign(), " action ", action)
return action
def _search(self, queueOfActions):
# Loop while have remaining iterations or time
iterCnt = 0
while iterCnt<self.maxIter and time.time()<self.timeMax:
self.oneIteration()
iterCnt+=1
########## Select the Best Action in this iteration #######
########## Select the best action based on its expected utilities ##########
if not self.root.children: continue
bestExpectedUtilities, bestActions = float('-inf'), []
epsilon = 0.00001 # Prevent numeric overflow
# The sequence of action follows the expansion policy used
for action, child in self.root.children.items():
if not child.utilities:
childUtilities=0
else:
childUtilities = sum([child.utilities[idx] for idx in self.utilityIdx]) if self.utilityIdx else sum(child.utilities)
expectedUtilities = childUtilities/(child.numVisits+epsilon)
if expectedUtilities>bestExpectedUtilities:
bestActions = [action]
bestExpectedUtilities = expectedUtilities
elif expectedUtilities==bestExpectedUtilities:
bestActions.append(action)
action = self.breakTies(bestActions)
queueOfActions.put(action)
####### End Selecting the Best Action in this iteration #######
# Nothing to return, just end the execution
return
def oneIteration (self)->None:
'''
Perform one iteration of leaf node selection, expansion (if applicable), simulation, and backpropagation.
Only expand a node if it was visited before. Otherwise, perform simulation on the node that wasn't visited.
Simulation is performed `self.simPerIter` times
'''
node = self.selection()
# If the node was visited, and expandable (not terminal)
if node.numVisits>0 and not node.state.isTerminal():
node = self.expansion(node)
for i in range(self.simPerIter):
utility = self.simulation(node)
self.backpropagation(node, utility, self.utilitySumFunc)
def selection(self)->Node:
'''
Select and returns a leaf node.
Traverse from the root node to the leaf node, following self.selectionPolicy
'''
# Select a leaf node starting from the root node
node = self.root
depth = 0
while not node.isLeaf():
node = self.selectionPolicy(node, depth)
depth+=1
return node
def expansion(self, node:Node)->Node:
'''
Fully expands a node and return one of its child node.
Expands a node following self.expansionPolicy.
Returns the first children node.
'''
# Fully expand the tree ahead of time
actions = self.expansionPolicy(node.state)
for action in actions:
# Add a new state to the tree
stateAfterAction = node.state.takeAction(action)
newNode = Node(stateAfterAction, node)
node.children[action] = newNode
# Choose the firstAction newNode to return
return node.children[actions[0]]
def simulation(self, node:Node)->Any:
'''
Returns the rewards received from this simulation
'''
return self.rollOutPolicy(node.state)
def backpropagation(self, node:Node, utility:Any, utilitySumFunc:Callable=sum)->None:
'''
BackPropagate results to parent nodes.
Update a node's Utility and Number of being visited.
utilitySumFunc: function used to sum two utilities. The default is sum()
'''
while node:
node.numVisits+=1
if node.utilities:
node.utilities = utilitySumFunc(node.utilities,utility)
else:
node.utilities = utility
node = node.parent
def linearExpansion(state:State)->List[Action]:
'''
Returns a list of actions in a sequence
that are encoded by the state.
'''
return state.getActions()
def randomRollout(state:State)->Any:
'''
Starting from the provided state, randomly take actions
until the terminal state.
Returns the terminal state utility.
'''
# Deep copy to perform takeAction that doesnt preserve state
# This is done to speed up rollout
state = pickle.loads(pickle.dumps(state))
while not state.isTerminal():
actions = state.getActions()
'''
n = len(actions)
t = int(str(time.time())[-1])
chosenIdx = t%n
action = actions[chosenIdx]
'''
action = random.choice(actions)
state = state.takeAction(action, preserveState=False)
return state.getUtility()
class UCB:
'''
Given a parent node, returns a child node according to UCB1 quantity.
utilityIdx: Applicable it the utilities are encoded with multiple elements, each representing different agents' utility
For example utility =(0,1,1). utilityIdx:=2 means that only utility[utilityIdx] is considered.
breakTies: Function used to choose an node from multiple equally good node.
'''
def __init__( self,
utilityIdx:Optional[List[int]]=None,
explorationConstant:Union[float, int] = math.sqrt(2),
breakTies:Callable[[List[Action]],Action]=random.choice
)->Node:
self.utilityIdx = utilityIdx
self.explorationConstant = explorationConstant
self.breakTies =breakTies
def __call__(self, node:Node, depth:int)->Node:
bestUCB, bestChildNodes = float('-inf'), []
epsilon = 0.00001
# The sequence of action follows the expansion policy used
for _, child in node.children.items():
if not child.utilities:
childUtilities=0
else:
# Shift the utilityIdx correctly so that each player is maximizing it's gain
numPlayers = len(child.utilities)
# No shifts if depth 0, numPlayers, 2*numPlayers
shift = depth%numPlayers
if self.utilityIdx:
shiftedUtilityIdx = [(idx + shift)%numPlayers for idx in self.utilityIdx]
childUtilities = sum([child.utilities[idx] for idx in shiftedUtilityIdx])
else:
childUtilities = sum(child.utilities)
#childUtilities = abs(childUtilities)
childExpectedUtility = childUtilities / (child.numVisits+epsilon)
ucb = childExpectedUtility + self.explorationConstant * math.sqrt(math.log(node.numVisits)/(child.numVisits+epsilon))
if ucb>bestUCB:
bestChildNodes = [child]
bestUCB = ucb
elif ucb==bestUCB:
bestChildNodes.append(child)
return self.breakTies(bestChildNodes) | TheanLim/MinimaxMCTS | mcts.py | mcts.py | py | 10,721 | python | en | code | 0 | github-code | 13 |
22334437985 | #Leetcode 226. Invert Binary Tree
#BFS
# Definition for a binary tree node.
# class TreeNode:
# def __init__(self, val=0, left=None, right=None):
# self.val = val
# self.left = left
# self.right = right
class Solution1:
def invertTree(self, root: TreeNode) -> TreeNode:
if root == None:
return None
queue = deque()
queue.append(root)
while queue:
current = queue.popleft()
temp = current.left
current.left = current.right
current.right = temp
if current.left:
queue.append(current.left)
if current.right:
queue.append(current.right)
return root
#DFS
# Definition for a binary tree node.
# class TreeNode:
# def __init__(self, val=0, left=None, right=None):
# self.val = val
# self.left = left
# self.right = right
class Solution2:
def invertTree(self, root: TreeNode) -> TreeNode:
if root == None:
return None
root.left, root.right = self.invertTree(root.right), self.invertTree(root.left)
return root | komalupatil/Leetcode_Solutions | Easy/Invert Binary Tree.py | Invert Binary Tree.py | py | 1,182 | python | en | code | 1 | github-code | 13 |
42014911165 | p=print
f=list(map(int,open('input')))
p(sum(f))
s=0
w=set()
for n in f:
s+=n
if s in w:break
w.add(s);f.append(n)
p(s)
| halvarsu/advent-of-code | python/day1/day1golf.py | day1golf.py | py | 123 | python | en | code | 0 | github-code | 13 |
34652749503 |
#import sqlite3
import sqlite3 as sql
#use conn instead of typing all of that
conn = sql.connect('db_strings.db')
#create the database if it doesn't exist already
with conn:
#use cur instead of conn.cursor()
cur = conn.cursor()
#create table if it doesnt exist
cur.execute("CREATE TABLE IF NOT EXISTS tbl_string( \
ID INTEGER PRIMARY KEY AUTOINCREMENT, \
col_strings TEXT \
)")
# == db.savechanges()
conn.commit()
#filelist from assignment
fileList = ('information.docx', 'Hello.txt', 'myImage.png', \
'myMovie.mpg', 'World.txt', 'data.pdf', 'myPhoto.jpg')
#iterate through filelist
for x in fileList:
#grab all files that end with .txt
if x.endswith('.txt'):
with conn:
cur = conn.cursor()
#insert files ending with .txt to db
cur.execute("INSERT INTO tbl_string (col_strings) VALUES (?)", (x,))
#== db.savechanges()
conn.commit()
#print to console
print(x)
#close connection
conn.close()
| markedin/Python-Projects | DatabaseSubmissionAssignment/dbSubAssignment.py | dbSubAssignment.py | py | 1,049 | python | en | code | 0 | github-code | 13 |
42642146444 | from mwpyeditor.core import mwglobals
from mwpyeditor.core.mwrecord import MwRecord
class MwLIGH(MwRecord):
def __init__(self):
MwRecord.__init__(self)
self.id_ = ''
self.model = ''
self.name = None
self.icon = None
self.weight = 0.0
self.value = 0
self.time = 0
self.radius = 0
self.red = 0
self.green = 0
self.blue = 0
self.dynamic = False
self.can_carry = False
self.negative = False
self.flicker = False
self.fire = False
self.off_default = False
self.flicker_slow = False
self.pulse = False
self.pulse_slow = False
self.sound_id = None
self.script = None
def load(self):
self.id_ = self.parse_string('NAME')
self.model = self.parse_string('MODL')
self.name = self.parse_string('FNAM')
self.icon = self.parse_string('ITEX')
self.weight = self.parse_float('LHDT')
self.value = self.parse_uint('LHDT', start=4)
self.time = self.parse_int('LHDT', start=8)
self.radius = self.parse_uint('LHDT', start=12)
self.red = self.parse_uint('LHDT', start=16, length=1)
self.green = self.parse_uint('LHDT', start=17, length=1)
self.blue = self.parse_uint('LHDT', start=18, length=1)
flags = self.parse_uint('LHDT', start=20)
self.dynamic = (flags & 0x1) == 0x1
self.can_carry = (flags & 0x2) == 0x2
self.negative = (flags & 0x4) == 0x4
self.flicker = (flags & 0x8) == 0x8
self.fire = (flags & 0x10) == 0x10
self.off_default = (flags & 0x20) == 0x20
self.flicker_slow = (flags & 0x40) == 0x40
self.pulse = (flags & 0x80) == 0x80
self.pulse_slow = (flags & 0x100) == 0x100
self.sound_id = self.parse_string('SNAM')
self.script = self.parse_string('SCRI')
mwglobals.object_ids[self.id_] = self
def wiki_entry(self):
return (f"""|-\n
|[[File:TD3-icon-light-{self.icon}.png]]\n
|{{{{Small|{self.id_}}}}}\n
|{mwglobals.decimal_format(self.weight)}||{self.value}||{{{{BG|#
{self.red:02X}{self.green:02X}{self.blue:02X}}}}}|'''{self.radius}'''||{self.time}""")
def record_details(self):
return MwRecord.format_record_details(self, [
("|Name|", '__str__'),
("\n|Model|", 'model'),
("\n|Icon|", 'icon'),
("\n|Weight| {:.2f}", 'weight'),
("\n|Value|", 'value'),
("\n|Time|", 'time'),
("\n|Radius|", 'radius'),
("\n|Color|", 'red'), (", {}", 'green'), (", {}", 'blue'),
("\n|Dynamic|", 'dynamic', False),
("\n|Can Carry|", 'can_carry', False),
("\n|Negative|", 'negative', False),
("\n|Flicker|", 'flicker', False),
("\n|Fire|", 'fire', False),
("\n|Off by Default|", 'off_default', False),
("\n|Flicker Slow|", 'flicker_slow', False),
("\n|Pulse|", 'pulse', False),
("\n|Pulse Slow|", 'pulse_slow', False),
("\n|Sound ID|", 'sound_id'),
("\n|Script|", 'script')
])
def __str__(self):
return f"{self.name} [{self.id_}]" if self.can_carry else f"[{self.id_}]"
def diff(self, other):
return MwRecord.diff(self, other, ['model', 'name', 'icon', 'weight', 'value', 'time', 'radius', 'red', 'green',
'blue', 'dynamic', 'can_carry', 'negative', 'flicker', 'fire', 'off_default',
'flicker_slow', 'pulse', 'pulse_slow', 'sound_id', 'script'])
| Dillonn241/MwPyEditor | mwpyeditor/record/mwligh.py | mwligh.py | py | 3,744 | python | en | code | 4 | github-code | 13 |
33524122338 | #backup.py
#-*- coding:utf-8 -*-
import subprocess
from dumpXml import dumpXml
# 1. get xml & parsing
def getXml():
cmd = "adb shell uiautomator dump"
proc = subprocess.Popen(
cmd,
shell = True,
stdout = subprocess.PIPE,
stderr = subprocess.PIPE
)
out, err = proc.communicate()
'''
cmd = "adb pull /sdcard/window_dump.xml"
proc = subprocess.Popen(
cmd,
shell = True,
stdout = subprocess.PIPE,
stderr = subprocess.PIPE
)
#out, err = proc.communicate()
'''
cmd = "adb shell cat /sdcard/window_dump.xml"
proc = subprocess.Popen(
cmd,
shell = True,
stdout = subprocess.PIPE,
stderr = subprocess.PIPE
)
out, err = proc.communicate()
omit = ['index', 'class', 'package', 'checkable', 'checked', 'clickable', 'enabled', 'focusable', 'focused', 'scrollable', 'long-clickable', 'password', 'selected', 'bounds', ]
parsedList = parseXml(out, omit)
for i in parsedList:
print(i+'\n')
return parsedList
# input: .txt xml file, attributes to be omitted
# output: .txt parsed xml file
# parse the given xml
def parseXml(xml, omit):
pr = xml.decode('utf-8').split('<')
parsedList = []
for p in pr:
#extracting only VIEWs
li = ''
if isViewClass(p):
#if 'view' in p or 'View' in p:
ll = p.split(' ')
for l in ll:
count = 0
for ele in omit:
if ele in l:
count += 1
if count == 0:
li += l+" "
parsedList.append(li)
parsedList = [i[5:-3] if i[-2] == ">" else i[5:] for i in parsedList]
return parsedList
### boolean function, whether a class of p is view(or View)
### input: xml file
### output: boolean
def isViewClass(p):
if 'class="' in p:
ps = p.split('class="')
for i in range(len(ps[1])):
if ps[1][i] == '"':
index = i
break
if 'view' in ps[1][:index] or 'View' in ps[1][:index]:
return 1
else:
return 0
def camelCaseBreak():
return 1
def removeStopWords():
return 1
def stemming():
return 1
def computeTf():
return 1
getXml() | sjoon2455/smartMonkey_login | backup.py | backup.py | py | 2,324 | python | en | code | 0 | github-code | 13 |
28250288576 | # https://www.udemy.com/course/100-days-of-code/learn/lecture/19658862#overview
# Day 11 - Blackjack game
############### Blackjack Project #####################
#Difficulty Normal 😎: Use all Hints below to complete the project.
#Difficulty Hard 🤔: Use only Hints 1, 2, 3 to complete the project.
#Difficulty Extra Hard ðŸ˜: Only use Hints 1 & 2 to complete the project.
#Difficulty Expert 🤯: Only use Hint 1 to complete the project.
############### Our Blackjack House Rules #####################
## The deck is unlimited in size.
## There are no jokers.
## The Jack/Queen/King all count as 10.
## The Ace can count as 11 or 1.
## Use the following list as the deck of cards:
## cards = [11, 2, 3, 4, 5, 6, 7, 8, 9, 10, 10, 10, 10]
## The cards in the list have equal probability of being drawn.
## Cards are not removed from the deck as they are drawn.
## The computer is the dealer.
# Solo attempt
# If the player score is above 21, they lose ("bust")
# If the score of the dealer is < 17, they must take another card
# If in the end, the player and dealer have the same score, it's a draw
# Ace is counted as 11 below 21, or 1 if the count ends up above 21
# The player is given 2 cards
# The dealer has one card visible, another hidden to the player
# The player can ask to be given another card, or can stand (stop with their current score)
# The dealer reveal their second card, if score < 17, they must take another card, hidden unless the player did stand
# Repeat 2 previous phases
import random
def blackjack():
def add_card(dict, nb):
# Adds nb of cards to dict, and updates the status of dict
hand = dict["cards"]
hand.extend(random.choices(cards, k=nb))
score = sum(hand)
dict["score"] = score
if score == 21:
dict["blackjack"] = True
def swap_ace(dict):
# Swaps an Ace value of 11 to a 1 and updates the status of dict
hand = dict["cards"]
ace_index = hand.index(11) # After correction, not necessary, could be remplaced with a .remove(11)
hand[ace_index] = 1 # then an .append(1)
score = sum(hand)
dict["score"] = score
if score == 21:
dict["blackjack"] = True
def end_game(dict):
# Checks whether the game continues or an end state is reached
hand = dict["cards"]
while True:
score = dict["score"]
if score > 21:
contain_ace = False
if 11 in hand:
contain_ace = True
if contain_ace:
swap_ace(dict)
else:
return True
elif score == 21:
return True
else:
return False
cards = [11, 2, 3, 4, 5, 6, 7, 8, 9, 10, 10, 10, 10]
player = {
"cards": [],
"score": 0,
"blackjack": False,
}
dealer = {
"cards": [],
"score": 0,
"blackjack": False,
}
add_card(player, 2)
add_card(dealer, 2)
game_over = False
while not game_over:
dealer_cards_copy = dealer["cards"].copy()
dealer_cards_copy.pop()
print(f"This is your hand: {player['cards']}")
print(f"This is the dealer's hand: {dealer_cards_copy}")
game_over = end_game(player) or end_game(dealer)
if game_over:
if player["score"] > 21:
print("Your score is above 21. You lose.")
return
elif player["blackjack"] and dealer["blackjack"]:
print(f"This is the dealer's hand: {dealer['cards']}")
print("It's a draw.")
return
elif player["blackjack"]:
# Some clarifications may be needed, can the dealer try to get a blackjack as well ?
# if so, the code here should instead jump to the if player_stood part
print(f"This is the dealer's hand: {dealer['cards']}")
print("You win.")
return
elif dealer["blackjack"]:
print(f"This is the dealer's hand: {dealer['cards']}")
print("You lose.")
return
input_check = True
player_stood = False
while input_check:
draw_more_input = input(
"Type 'y' if you want to draw 1 more card, else type 'n' to stop with your current hand: ")
if draw_more_input == "y":
# Will loop back to the beginning until an end state or the player chooses to stand
add_card(player, 1)
end_game(player)
input_check = False
elif draw_more_input == "n":
player_stood = True
input_check = False
else:
print("Wrong input.")
# Only active if the player decide to stand, passing the turn to the dealer
if player_stood:
while dealer["score"] < 17:
add_card(dealer, 1)
print(f"This is the dealer's hand: {dealer['cards']}")
game_over = end_game(player) or end_game(dealer)
if game_over:
if dealer["score"] > 21:
print("The dealer score is above 21. You win.")
return
elif dealer["blackjack"]:
print("You lose.")
return
# Final score check
game_over = True
if player["score"] == dealer["score"]:
print(f"This is your hand: {player['cards']}")
print(f"This is the dealer's hand: {dealer['cards']}")
print("It's a draw.")
return
elif player["score"] > dealer["score"]:
print(f"This is your hand: {player['cards']}")
print(f"This is the dealer's hand: {dealer['cards']}")
print("You win.")
return
else:
print(f"This is your hand: {player['cards']}")
print(f"This is the dealer's hand: {dealer['cards']}")
print("You lose.")
return | avk-ho/100-days-of-Python | day11_blackjack-project.py | day11_blackjack-project.py | py | 6,258 | python | en | code | 0 | github-code | 13 |
16791698368 | """ Network formation model with preference parameter
@Author: Daniel Roncel Díaz
Script with functions to run load data and create plots.
"""
import numpy as np
import statistics
import math
from collections import Counter
import pickle
import matplotlib.pyplot as plt
## Utils
base_colors = ['orange', 'blue']
def flatten(t):
"""
e.g. [[1, 2, 3], [4, 5, 6]] --> [1, 2, 3, 4, 5, 6]
"""
return [item for sublist in t for item in sublist]
def get_folder_name(n, m, m0, c0, p0, p0i, p1i, beta):
"""
Given the parameters of the model, return the name of the folder
that contains (or will contain) it.
"""
return 'n_'+str(n)+'_m_'+str(m)+'_m0_'+str(m0)+'_c0_'+str(c0)+'_p0_'+str(p0)+'_p0i_'+str(p0i)+'_p1i_'+str(p1i)+'_beta_'+str(beta)
def load_simulation_data(n, m, n_simulations, folder_name):
"""
Given the directory where the results of an experiments where saved (see
compute_simulation function in model.py) returns a list with the degrees
of the nodes of each class for each experiment.
Parameters:
n: Number of nodes of the output graph.
m: Edges attached to the new node in each step.
n_simulations: Number of simulations of the experiment.
folder_name: Path of the directory to save the graph and the node classes.
Return:
G: Example of networkx graph.
degrees_majority: List of lists of the degrees of the nodes of the majority class
for each simulation.
degrees_minority: List of lists of the degrees of the nodes of the minority class
for each simulation.
degree_growth_majority: Mean degree of the majority class at each timestep.
degree_growth_minority: Mean degree of the majority class at each timestep.
sorted_degree_per_class: List of one list for each simulation of the class of each
node sorted by degree non-descending.
"""
G_simulation = pickle.load(open(folder_name + '/G_simulation.pickle', 'rb'))
c_simulation = pickle.load(open(folder_name + '/c_simulation.pickle', 'rb'))
k_majority_simulation = pickle.load(open(folder_name + '/k_majority_simulation.pickle', 'rb'))
k_minority_simulation = pickle.load(open(folder_name + '/k_minority_simulation.pickle', 'rb'))
# degrees_majority[i] = list of the degrees of the nodes of the majority class
# at the end of the i-th experiment. Analogous definition for degrees_minority[i]
degrees_majority = []
degrees_minority = []
# save at index i the degree of each class at timestep i
degree_growth_majority = np.zeros(n)
degree_growth_minority = np.zeros(n)
# sorted_degree_per_class[i][j] = 0 if the j-th node with largest degree in the
# i-th simulation belongs to the majority class.
# sorted_degree_per_class[i][j] = 1 otherwise.
# sorted_degree_per_class[i][0] is the class of the node with largest degree
# in the i-th simulation
sorted_degree_per_class = np.zeros((n_simulations, n))
for i in range(n_simulations):
G, c, k_majority, k_minority = G_simulation[i], c_simulation[i], k_majority_simulation[i], k_minority_simulation[i]
nodes_degree = G.degree()
# list with the degrees of only the nodes of the minority class
d_majority = sorted([degree for node, degree in nodes_degree if c[node] == 0])
# analogous for the minority class
d_minority = sorted([degree for node, degree in nodes_degree if c[node] == 1])
# store them
degrees_majority.append(d_majority)
degrees_minority.append(d_minority)
# currently, degree_growth_majority[i] = sum of the degree of the majority class at timestep i
# accross all simulations
degree_growth_majority += np.array(k_majority)
degree_growth_minority += np.array(k_minority)
## sorted_degree_per_class[i] = 0 if the i-th node with larger degree is from the majority class. 1 otherwise.
node_degree = sorted( dict(G.degree()).items(), key=lambda item: item[1], reverse=True)
degree_per_class = np.array([c[nd[0]] for nd in node_degree])
sorted_degree_per_class[i:,] = degree_per_class
# Compute the mean degree at each timestep
degree_growth_majority = degree_growth_majority / n_simulations
degree_growth_minority = degree_growth_minority / n_simulations
return G, degrees_majority, degrees_minority, degree_growth_majority, degree_growth_minority, sorted_degree_per_class
def load_data_for_plots(n, m, m0, c0, p0, p1, p0i, p1i, beta, n_simulations, folder_name):
"""
Load the data of several experiments at the same time to simplify creating the plots.
Parameters:
n: Number of nodes of the output graph
m: Edges attached to the new node in each step
m0: Number of nodes of the initial graph
c0: List with the probability of a node to belong to the majority class in each experiment.
p0: List with the probability of a node of having prior preference for the majority class
in each experiment.
p1: List with the probability of a node of having prior preference for the minority class
in each experiment.
p0i: List with the ratio of nodes with prior preference for the majority class that finally
have no preference for any class in each experiment.
p1i: List with the ratio of nodes with prior preference for the minority class that finally
have no preference for any class in each experiment.
beta: Parameter to calibrate the probability of attracting an edge.
n_simulations: Number of simulations of the experiment.
folder_name: Path of the directory to save the graph and the node classes.
Returns:
avg_degree_majority: Average normalized degree of the majority class of each experiment.
avg_degree_minority: Average normalized degree of the minority class of each experiment.
std_degree_majority: Standard deviation of the normalized degree of the majority class
of each expermient.
std_degree_minority: Standard deviation of the normalized degree of the minority class
of each expermient.
degree_dist_majority: Data for the degree distribution plot of the majority class.
degree_dist_minority: Data for the degree distribution plot of the minority class.
degree_growth_majority_experiment: List of lists with the average degree of the majority class in
each timestep of each experiment.
degree_growth_minority_experiment: List of lists with the average degree of the minority class in
each timestep of each experiment.
avg_degree_majority_list: List of lists of the average degree of the majority class on each
simulation of each experiment.
avg_degree_minority_list: List of lists of the average degree of the minority class on each
simulation of each experiment.
results: Percentage of nodes of the minority class within the top nodes with largest degree.
step: Defines which percentage of nodes with largest degree has been used to create 'results' variable.
"""
# avg_degree_majority[i] = mean value of majority_degree / (majority_degree + minority_degree)
# across all the simulations of the i-th experiment.
# Analogous definition for avg_degree_minority[i]
avg_degree_majority = []
avg_degree_minority = []
# std_degree_majority[i] = standard deviation of the degree of the majority class in the i-th
# experiment. Analogous definition for std_degree_minority[i].
std_degree_majority = []
std_degree_minority = []
# degree_dist_majority[i][j] = mean number of nodes of the majority class with degree j in the i-th experiment
# Analogous definition for degree_dist_minority
degree_dist_majority = []
degree_dist_minority = []
# degree_growth_majority_experiment[i][j] = Mean degree of the majority class in the i-th experiment at timestep j.
# Analogous for the degree_growth_minority_experiment.
degree_growth_majority_experiment = []
degree_growth_minority_experiment = []
# avg_degree_majority_list[i][j] = average degree of the majority class in the j-th simulation of the i-th experiment.
# analogous definition of avg_degree_minority_list.
avg_degree_majority_list = []
avg_degree_minority_list = []
# e.g. if steps[0] = 0.1 --> we will measure the percentage of nodes in the minority class in the 10%
# nodes with largest degree
step = list(np.linspace(0, 1, 11))[1:]
# results[i] = for the specified value of h, returns the percentage of nodes in the minority class
# with largest degree within the top defined by variable 'step'
results = np.zeros((len(p0), len(step)))
for i in range(len(p0)):
# get the full path where the results of the i-th experiment are stored
final_path = folder_name + '/' + get_folder_name(n=n, m=m, m0=m0, c0=c0[i], p0=p0[i], p0i=p0i[i], p1i=p1i[i], beta=beta)
## Get the degrees of the simulations
_, degrees_majority, degrees_minority, degree_growth_majority, degree_growth_minority, sorted_degree_per_class = load_simulation_data(n, m, n_simulations, final_path)
degree_growth_majority_experiment.append(degree_growth_majority)
degree_growth_minority_experiment.append(degree_growth_minority)
# lists of the average degree of the majority class on each simulation of this experiment
k_majority = []
# lists of the average degree of the minority class on each simulation of this experiment
k_minority = []
# For each simulation, compute the mean average degree of each class.
for j in range(n_simulations):
k = sum(degrees_majority[j]) + sum(degrees_minority[j])
k_majority.append( sum(degrees_majority[j]) / k)
k_minority.append( sum(degrees_minority[j]) / k)
# Compute the mean degree of the majority class across all simulations of this experiment.
avg_degree_majority.append(statistics.mean(k_majority))
# Compute the mean degree of the minority class across all simulations of this experiment.
avg_degree_minority.append(statistics.mean(k_minority))
avg_degree_majority_list.append(k_majority)
avg_degree_minority_list.append(k_minority)
# Estimate standard deviation of the mean degree of each class
std_degree_majority.append(statistics.stdev(k_majority))
std_degree_minority.append(statistics.stdev(k_minority))
##Data for the degree distribution plot
# merge all the degrees obtained by nodes of the majority class in all the simulations
# of this experiment
degrees_majority = flatten(degrees_majority)
max_majority = max(degrees_majority)
ctr_max = Counter(degrees_majority)
# data_majority[i] = number of nodes with degree i in some simulation
data_majority = [ctr_max[i] if i in ctr_max else 0 for i in range(max_majority)]
degree_dist_majority.append(data_majority)
# merge all the degrees obtained by nodes of the minority class in all the simulations
# of this experiment
degrees_minority = flatten(degrees_minority)
max_minority = max(degrees_minority)
ctr_max = Counter(degrees_minority)
# data_minority[i] = number of nodes with degree i in some simulation
data_minority = [ctr_max[i] if i in ctr_max else 0 for i in range(max_minority)]
degree_dist_minority.append(data_minority)
# sorted_degree_per_class[i][j] = mean number of nodes of the minority class between the (j+1)-th nodes
# with largest degree in this experiment.
sorted_degree_per_class = np.cumsum(sorted_degree_per_class,axis=1).mean(axis=0)
# results[i][j] = mean number of nodes of the minority class in the (step[j]*n) top nodes with largest degree
for j in range(len(step)):
results[i, j] = sorted_degree_per_class[math.floor(step[j]*n) - 1] / math.floor(step[j] * n)
return avg_degree_majority, avg_degree_minority, std_degree_majority, std_degree_minority, degree_dist_majority, degree_dist_minority, degree_growth_majority_experiment, degree_growth_minority_experiment, results, step, avg_degree_majority_list, avg_degree_minority_list
## Functions for plot
def class_degree_barplot(avg_degree_minority, avg_degree_majority, std_degree_minority, std_degree_majority, c0, c1, p0, p1, p0i, p1i, figsize=(16,5)):
global base_colors
# Average degree barplot
fig, axes = plt.subplots(1, len(c0), figsize=figsize, constrained_layout=True)
fig.suptitle('Grau de cada classe', fontsize=15)
for i in range(len(c0)):
x = ['Minoritària', 'Majoritària']
y = [avg_degree_minority[i], avg_degree_majority[i]]
error = [std_degree_minority[i], std_degree_majority[i]]
axes[i].bar(x,
y,
yerr=error,
align='center',
alpha=0.8,
ecolor='black',
capsize=10,
color=base_colors)
#axes[i].set_title('P_0=%.2f, P_1=%.2f' % (p0[i], p1[i]))
#axes[i].set_title('C_0=%.2f, C_1=%.2f' % (c0[i], c1[i]))
axes[i].set_title('C_0=%.2f, P_0=%.2f, P_0i=%.2f, P_1i=%.2f' % (c0[i], p0[i], p0i[i], p1i[i]))
axes[i].set_ylabel('K_i / K')
axes[i].set_ylim(0.0, 1.0)
# Reference lines
axes[i].axhline(c0[i], linestyle='--', color='b')
axes[i].axhline(c1[i], linestyle='--', color='y')
axes[i].grid()
plt.show()
def class_abs_degree_barplot(avg_degree_minority, avg_degree_majority, std_degree_minority, std_degree_majority, c0, c1, p0, p1, p0i, p1i, figsize=(16,5)):
global base_colors
# Average degree barplot
fig, axes = plt.subplots(1, len(c0), figsize=figsize, constrained_layout=True)
fig.suptitle('Grau de cada classe', fontsize=15)
for i in range(len(c0)):
x = ['Minoritària', 'Majoritària']
y = [avg_degree_minority[i], avg_degree_majority[i]]
error = [std_degree_minority[i], std_degree_majority[i]]
axes[i].bar(x,
y,
yerr=error,
align='center',
alpha=0.8,
ecolor='black',
capsize=10,
color=base_colors)
#axes[i].set_title('P_0=%.2f, P_1=%.2f' % (p0[i], p1[i]))
#axes[i].set_title('C_0=%.2f, C_1=%.2f' % (c0[i], c1[i]))
axes[i].set_title('C_0=%.2f, P_0=%.2f, P_0i=%.2f, P_1i=%.2f' % (c0[i], p0[i], p0i[i], p1i[i]))
axes[i].set_ylabel('K_i')
#axes[i].set_ylim(0.0, 50000)
# Reference lines
axes[i].axhline(c0[i], linestyle='--', color='b')
axes[i].axhline(c1[i], linestyle='--', color='y')
axes[i].grid()
plt.show()
def degree_distribution(degree_dist_minority, degree_dist_majority, c0, c1, p0, p1, p0i, p1i, figsize=(16,5)):
fig, axes = plt.subplots(1, len(c0), figsize=figsize, constrained_layout=True)
fig.suptitle('Distribució de grau de cada classe', fontsize=16)
for i in range(len(c0)):
# Plot the dist. of the minority class
degree_hist = np.array(degree_dist_minority[i], dtype=float)
degree_prob = degree_hist / len(degree_dist_minority[i])
axes[i].loglog(np.arange(degree_prob.shape[0]),degree_prob,'.', color=base_colors[0], alpha=0.8)
# Plot the dist. of the majority class
degree_hist = np.array(degree_dist_majority[i], dtype=float)
degree_prob = degree_hist / len(degree_dist_majority[i])
axes[i].loglog(np.arange(degree_prob.shape[0]),degree_prob,'.', color=base_colors[1], alpha=0.8)
#axes[i].set_title('P_0=%.2f, P_1=%.2f' % (p0[i], p1[i]))
#axes[i].set_title('C_0=%.2f, C_1=%.2f' % (c0[i], c1[i]))
axes[i].set_title('C_0=%.2f, P_0=%.2f, P_0i=%.2f, P_1i=%.2f' % (c0[i], p0[i], p0i[i], p1i[i]))
axes[i].grid()
axes[i].set_xlabel('k')
axes[i].set_ylabel('prob(k)')
def degree_growth_plot(degree_growth_minority_experiment, degree_growth_majority_experiment, c0, c1, p0, p1, p0i, p1i, m0, n, figsize=(16,5)):
fig, axes = plt.subplots(1, len(c0), figsize=figsize, constrained_layout=True)
fig.suptitle('Evolució del grau de cada classe', fontsize=16)
for i in range(len(c0)):
axes[i].plot([j for j in range(n)] ,degree_growth_majority_experiment[i], color=base_colors[1], linewidth=3, alpha=0.8)
axes[i].plot([j for j in range(n)] ,degree_growth_minority_experiment[i], color=base_colors[0], linewidth=3, alpha=0.8)
#axes[i].set_title('P_0=%.2f, P_1=%.2f' % (p0[i], p1[i]))
#axes[i].set_title('C_0=%.2f, C_1=%.2f' % (c0[i], c1[i]))
axes[i].set_title('C_0=%.2f, P_0=%.2f, P_0i=%.2f, P_1i=%.2f' % (c0[i], p0[i], p0i[i], p1i[i]))
axes[i].set_xlabel('t')
axes[i].set_ylabel('K_i')
axes[i].grid()
def minority_in_top_d(results, step, c0, c1, p0, p1, p0i, p1i):
for i in range(len(p0)):
plt.plot(step, results[i,:], alpha=0.8)
plt.xlabel("% d")
plt.ylabel("Num. of nodes of the minority class in the top d% with larger degree")
#plt.legend(["c0=%.1f p0=%.1f p1=%.1f p0i=%.1f pi1=%.1f" % (c0[i], p0[i], p1[i], p0i[i], p1i[i]) for i in range(len(p0))])
plt.legend(["P_0=%.1f P_1=%.1f" % (p0[i], p1[i]) for i in range(len(p0))])
plt.axis([0, 1, 0, 0.7])
# Reference line
plt.axhline(y=c1[i], xmin=0, xmax=1, linestyle='--', color='k')
plt.grid() | danielroncel/tfg | network_formation_model_preference/graphics.py | graphics.py | py | 18,169 | python | en | code | 0 | github-code | 13 |
70165357779 | #!/usr/bin/env python
from __future__ import print_function
import sys
# WRONG
# Sum of 24632 numbers that cannot be expressed as the sum of two abundant numbers: 346398923
def is_perfect(num):
return sum(divisors(num)) == num
def is_abundant(num):
return sum(divisors(num)) > num
def is_deficient(num):
return sum(divisors(num)) < num
def divisors(num):
divs = []
for i in range(1, int(num/2)+1):
if num % i == 0:
divs.append(i)
#print("%d = %s" % (num, divs))
return divs
def factor(num):
factors = set()
i = 2
while i < num:
if num % i == 0:
factors.add(i)
factors.add(num / i)
num = num / i
factors = set.union(factors, factor(num))
i += 1
return factors
def is_sum_of_abundant(num):
factors = factor(num)
for i in factors:
if i < 12:
continue
if is_abundant(i):
for j in factors:
if j < 12:
continue
if i + j == num:
#print("%d + %d = %d" % (i, j, num))
return True
return False
def main():
print("Is 12 perfect -> %s" % is_perfect(12))
print("Is 12 abundant -> %s" % is_abundant(12))
print("Is 12 deficient -> %s" % is_deficient(12))
print("Is 24 sum of abundant -> %s" % is_sum_of_abundant(24))
print("Is 25 sum of abundant -> %s" % is_sum_of_abundant(25))
print("Is 20 sum of abundant -> %s" % is_sum_of_abundant(20))
no_abundant_sums = []
for i in xrange(1, 28123):
if not is_sum_of_abundant(i):
no_abundant_sums.append(i)
#print(no_abundant_sums)
print("Sum of %d numbers that cannot be expressed as the sum of two abundant numbers: %d" %
(len(no_abundant_sums), sum(no_abundant_sums)))
main() | jarretraim/euler_py | 21-30/23.py | 23.py | py | 1,627 | python | en | code | 0 | github-code | 13 |
38711940671 | from tkinter import *
from tkinter import colorchooser
import random
tk=Tk()
canvas=Canvas(tk, width=500, height=500)
canvas.pack()
def func1():
canvas.create_arc(10,10, 200, 80, extent=45, style=ARC)
canvas.create_arc(10,80, 200, 160, extent=90, style=ARC)
canvas.create_arc(10,10, 200, 240, extent=135, style=ARC)
canvas.create_arc(10,10, 200, 320, extent=180, style=ARC)
canvas.create_arc(10,10, 200, 400, extent=359, style=ARC)
def func2():
canvas.create_polygon(200,10,240,30, 120, 100, 140,120)
def func3():
def random_rectangle(width, height):
x1=random.randrange(width)
y1=random.randrange(height)
x2=x1+random.randrange(width)
y2=y1+random.randrange(height)
canvas.create_rectangle(x1,y1,x2,y2)
for i in range(0,100):
random_rectangle(200,200)
def random_rectangle(width, height, fill_color):
x1=random.randrange(width)
y1=random.randrange(height)
x2=x1+random.randrange(width)
y2=y1+random.randrange(height)
canvas.create_rectangle(x1,y1,x2,y2, fill=fill_color)
colorchooser.askcolor()
c=colorchooser.askcolor()
random_rectangle(400,400,c[1])
random_rectangle(400, 400, 'green')
btn=Button(tk, text="draw arc",command=func1)
btn.pack()
btn2=Button(tk, text="draw polygon", command=func2)
btn2.pack()
btn3=Button(tk, text="draw rectangle", command=func3)
btn3.pack()
btn4=Button(tk, text="draw color rectangle ")
btn4.pack()
| VigularIgnat/python | project ph/ark kolo duga.py | ark kolo duga.py | py | 1,445 | python | en | code | 0 | github-code | 13 |
16402762030 | #!/usr/bin/env python
# -*- coding:utf-8 -*-
# Author: wxnacy@gmail.com
"""
命令补全
"""
from prompt_toolkit.completion import Completer
from prompt_toolkit.completion import Completion
from wapi.common import constants
from wapi.common.loggers import create_logger
from wapi.common.args import ArgumentParser
from .base import BaseCompleter
from .filesystem import ExecutableCompleter
from .word import WordCompleter as WapiWordCompleter
from wapi.argument import ArgumentParserFactory
from wpy.argument import CommandArgumentParser
class CommandCompleter(BaseCompleter):
logger = create_logger("CommandCompleter")
# parser_dict = {}
def __init__(self, argparser, wapi):
self.argparser = argparser
self.wapi = wapi
self.path_completer = ExecutableCompleter()
def yield_words(self, words):
"""获取命令参数的补全器"""
# self.logger.info('completion words %s', words)
if words and isinstance(words[0], dict):
words = [Completion(**o) for o in words]
_completer = WapiWordCompleter(words)
yield from self.yield_completer(_completer)
def get_completions(self, document, complete_event):
super().get_completions(document, complete_event)
self.document = document
self.complete_event = complete_event
try:
self.argparser = ArgumentParserFactory.build_parser(document.text)
self.logger.info('completer argparser %s', self.argparser.cmd)
arg = self.argparser.parse_args(document.text)
if arg.cmd == 'env':
self.argparser.set_wapi(self.wapi)
self.logger.info('args %s', arg)
cmd = self.first_word
all_cmds = list(ArgumentParserFactory.get_cmd_names())
# 补全命令
if cmd not in all_cmds:
yield from self.yield_words(all_cmds)
return
word_for_completion = self.word_for_completion
# 使用自定义方法返回补全单词
words = self.wapi.config.get_function().get_completion_words(
word_for_completion)
if words:
yield from self.yield_completer(WapiWordCompleter(words))
return
# 补全参数后的信息
words = self.argparser.get_completions_after_argument(self.wapi,
word_for_completion)
if words:
# words = [Completion(**o) for o in words]
# _completer = WapiWordCompleter(words)
yield from self.yield_words(words)
return
# 使用模块自带的补全
if word_for_completion in ('--config', '--root'):
yield from self.yield_completer(self.path_completer)
else:
words = self.argparser.get_completions_after_cmd(arg,
word_for_completion)
yield from self.yield_words(words)
except:
import traceback
self.logger.error(traceback.format_exc())
self.logger.error(traceback.format_stack())
| wxnacy/wpy | wpy/completion/command.py | command.py | py | 3,161 | python | en | code | 0 | github-code | 13 |
21586109661 | """
Hash Function
-------------
In data structure Hash, hash function is used to convert a string(or any other
type) into an integer smaller than hash size and bigger or equal to zero.
The objective of designing a hash function is to "hash" the key as
unreasonable as possible. A good hash function can avoid collision as less as
possible. A widely used hash function algorithm is using a magic number 33,
consider any string as a 33 based big integer like follow:
hashcode("abcd")
= (ascii(a) * 333 + ascii(b) * 332 + ascii(c) *33 + ascii(d)) % HASH_SIZE
= (97* 333 + 98 * 332 + 99 * 33 +100) % HASH_SIZE
= 3595978 % HASH_SIZE
here HASH_SIZE is the capacity of the hash table (you can assume a hash table
is like an array with index 0 ~ HASH_SIZE-1).
Given a string as a key and the size of hash table, return the hash value of
this key.
Clarification
For this problem, you are not necessary to design your own hash algorithm
or consider any collision issue, you just need to implement the algorithm
as described.
Example 1:
- Input: key="abcd", size = 10000
- Output: 978
- Explanation: (97*33^3 + 98*33^2 + 99*33 + 100*1)%1000 = 978
Example 2:
- Input: key="abcd", size = 100
- Output: 78
- Explanation: (97*33^3 + 98*33^2 + 99*33 + 100*1)%100 = 78
Reference:
- https://algorithm.yuanbin.me/zh-hans/math_and_bit_manipulation/hash_function.html
- https://www.lintcode.com/problem/hash-function/description
"""
import unittest
def hash_code(key, HASH_SIZE):
"""
Return the hash value of given key and the size of hash table
:param key: given key
:type key: str
:param HASH_SIZE: size of hash table
:type HASH_SIZE: int
:return: hash value
:rtype: int
"""
n = len(key)
res = 0
for i in range(n):
# hash(abc) = (a * 33^2 + b * 33 + c) % M
# = (33(33(33 * 0 + a) + b) + c) % M
# = (33(33(33 * 0 + a) % M + b) % M + c) % M
res = 33 * res + ord(key[i])
res %= HASH_SIZE
return res
class TestHashFunction(unittest.TestCase):
def test_hash_function(self):
self.assertEqual(978, hash_code('abcd', 1000))
self.assertEqual(78, hash_code('abcd', 100))
if __name__ == '__main__':
unittest.main()
| corenel/lintcode | algorithms/128_hash_code.py | 128_hash_code.py | py | 2,291 | python | en | code | 1 | github-code | 13 |
6786162662 | #!/usr/bin/env python
# coding: utf-8
# In[7]:
import numpy as np
a=np.arange(40,50)
b=np.arange(50,60)
x_values = [a[0], b[0]]
y_values = [a[1], b[1]]
plt.plot(x_values, y_values)
# In[20]:
import matplotlib.pyplot as plt
sales_1 = [160,150,140,145,175,165,180]
sales_2 = [70,90,160,150,140,145,175]
line_chart1 = plt.plot(range(1,6), sales1,'--')
line_chart2 = plt.plot(range(1,6), sales2,':')
plt.title('Daily sales of Salesman1 and Salesman2')
plt.xlabel('Days')
plt.ylabel('Sales')
plt.legend(['Sales of salesman 1', 'Sales of salesman 2'], loc=4)
plt.show()
# In[26]:
import matplotlib.pyplot as plt
x = [1,2,3,4]
y1 = [4,3,2,1]
y2 = [10,20,30,40]
y3 = [40,30,20,10]
y4 = [1,2,1,2]
y5 = [40,70,90,70]
fig, ax = plt.subplots(3)
fig, ax1=plt.subplots(2)
ax[0].plot(x, y1)
ax[1].plot(x, y2)
ax[2].plot(x, y3)
ax1[0].plot(x, y4)
ax1[1].plot(x, y5)
| RAJASOORYA/Data-Visualization-using-python | Day 1 Assignment.py | Day 1 Assignment.py | py | 871 | python | en | code | 0 | github-code | 13 |
23549556614 | # -*- coding: utf-8 -*-
"""
Created on Sat Apr 4 13:02:53 2020
@author: tmuza
"""
# Importing Libraries
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from sklearn.preprocessing import Imputer, LabelEncoder, OneHotEncoder, StandardScaler
from sklearn.model_selection import train_test_split
# import dataset
dataset = pd.read_csv('Data.csv') # read the data set
x = dataset.iloc[:, :-1].values # independent variables
y = dataset.iloc[:, 3].values # dpended variables
# deal with missing data
imputer = Imputer(missing_values='NaN', strategy='mean', axis=0)
imputer = imputer.fit(x[:, 1:3]) # fitting the data with missing values from index 1-2
x[:, 1:3] = imputer.transform(x[:, 1:3])
# encode categorical data
labelencoder_x = LabelEncoder()
x[:, 0] = labelencoder_x.fit_transform(x[:, 0]) # assining the countries to encoded values.
onehotencoder = OneHotEncoder(categorical_features=[0]) # specify which column you want to encode
x = onehotencoder.fit_transform(x).toarray()
labelencoder_y = LabelEncoder()
y = labelencoder_y.fit_transform(y) # encode y values
# Splitting the dataset into the train set and test set
x_train, x_test, y_train, y_test = train_test_split(x, y, test_size=0.2, random_state=0)
# Feature scalling
scale_x = StandardScaler()
x_train = scale_x.fit_transform(x_train)
x_test = scale_x.transform(x_test)
| tmuzanenhamo/Machine-Learning-Algorithms | Data Preprocessing/Data Preprocessing.py | Data Preprocessing.py | py | 1,374 | python | en | code | 0 | github-code | 13 |
27188481113 | import sys
from collections import deque
input = sys.stdin.readline
N, M = map(int, input().split())
def bfsW(s):
queue = deque()
queue.append(s)
visited[s[0]][s[1]] = 1
cnt_w = 1
di, dj = [0, 1, 0, -1], [1, 0, -1, 0]
while queue:
n = queue.popleft()
for k in range(4):
ni, nj = n[0] + di[k], n[1] + dj[k]
if 0 <= ni < M and 0 <= nj < N and arr[ni][nj] == 'W' and visited[ni][nj] == 0:
queue.append([ni, nj])
visited[ni][nj] = 1
cnt_w += 1
return cnt_w ** 2
def bfsB(s):
queue = deque()
queue.append(s)
visited[s[0]][s[1]] = 1
cnt_b = 1
di, dj = [0, 1, 0, -1], [1, 0, -1, 0]
while queue:
n = queue.popleft()
for k in range(4):
ni, nj = n[0] + di[k], n[1] + dj[k]
if 0 <= ni < M and 0 <= nj < N and arr[ni][nj] == 'B' and visited[ni][nj] == 0:
queue.append([ni, nj])
visited[ni][nj] = 1
cnt_b += 1
return cnt_b ** 2
arr = [list(input().strip()) for _ in range(M)]
visited = [[0] * N for _ in range(M)]
cnt_w = 0
cnt_b = 0
for i in range(M):
for j in range(N):
if visited[i][j] == 0 and arr[i][j] == 'W':
cnt_w += bfsW([i, j])
elif visited[i][j] == 0 and arr[i][j] == 'B':
cnt_b += bfsB([i, j])
print(cnt_w, cnt_b)
| Nam4o/Algorithm | 백준/Silver/1303. 전쟁 - 전투/전쟁 - 전투.py | 전쟁 - 전투.py | py | 1,462 | python | en | code | 1 | github-code | 13 |
3023952966 | import requests
from bs4 import BeautifulSoup
from selenium import webdriver
from requests.packages.urllib3.exceptions import InsecureRequestWarning
from pymongo import MongoClient # pymongo를 임포트 하기(패키지 인스톨 먼저 해야겠죠?)
client = MongoClient('mongodb://test:test@localhost', 27017) # mongoDB는 27017 포트로 돌아갑니다.
db = client.dbschool # 'dbreview'라는 이름의 db를 만들거나 사용합니다.
# Disable flag warning
requests.packages.urllib3.disable_warnings(InsecureRequestWarning)
"""
# 시도 / 시군구 정보 가져오기
sido_sigungu_list = [
{
'sido_name': '서울특별시',
'sido_code': '1100000000',
'sigungu_list': [
{
'sigungu_name': '강남구',
'sigungu_code': 1168000000
},
{
'sigungu_name': '강동구',
'sigungu_code': 1174000000
},
{
'sigungu_name': '강북구',
'sigungu_code': 1130500000
},
...
]
},
...
]
"""
def get_sido_sigungu_list():
# 브라우저 실행
options = webdriver.ChromeOptions()
options.add_argument('headless')
options.add_argument(
"user-agent=Mozilla/5.0 (Macintosh; Intel Mac OS X 10_12_6) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/61.0.3163.100 Safari/537.36")
driver = webdriver.Chrome("./chromedriver", options=options)
driver.get("https://www.schoolinfo.go.kr/ei/ss/pneiss_a05_s0.do")
# 소스 가져오기
soup = BeautifulSoup(driver.page_source, 'html.parser')
side_options = soup.select('#sidoCode option')
result = []
for sido_option in side_options:
if sido_option['value'] != '':
# 시도 가져오기
sido_name = sido_option.text
sido_code = sido_option['value']
# 시도 클릭
sido_option = driver.find_element_by_css_selector(f'#sidoCode > option[value="{sido_code}"]')
sido_option.click()
driver.implicitly_wait(1)
# 시군구 가져오기
soup = BeautifulSoup(driver.page_source, 'html.parser')
sigungu_options = soup.select('#sigunguCode option')
sigungu_list = []
for sigungu_option in sigungu_options:
if sigungu_option['value'] != '':
sigungu = sigungu_option.text
sigungu_code = sigungu_option['value']
sigungu_list.append({
'sigungu_name': sigungu,
'sigungu_code': sigungu_code
})
result.append({
'sido_name': sido_name,
'sido_code': sido_code,
'sigungu_list': sigungu_list
})
# 브라우저 종료
driver.close()
return result
"""
# 학교 정보 가져오기
{
"schoolList": [
{
"SCHUL_RDNMA": "서울특별시 강남구 개포로 402",
"FOND_SC_CODE": "2",
"USER_TELNO_SW": "02-576-3333",
"ZIP_CODE": "135240",
"SCHUL_KND_SC_CODE": "04",
"DTLAD_BRKDN": "173번지",
"USER_TELNO": "02-576-3333",
"JU_ATPT_OFCDC_CODE": "B100000001",
"LCTN_NM": "서울",
"PERC_FAXNO": "02-571-6560",
"HS_KND_SC_CODE": "01",
"USER_TELNO_GA": "02-576-3334",
"HMPG_ADRES": "http://gaepo.sen.hs.kr",
"ADRES_BRKDN": "서울특별시 강남구 개포동",
"SCHUL_CODE": "B100000373",
"ADRCD_ID": "1168010300",
"JU_DSTRT_OFCDC_CODE": "B100000001",
"SCHUL_NM": "개포고등학교"
},
...
]
}
"""
def get_school(sido_code, sigungu_code):
url = 'https://www.schoolinfo.go.kr/ei/ss/pneiss_a05_s0/selectSchoolListLocation.do'
data = {
'HG_JONGRYU_GB': '04', # 학교 급 (ex. 고등학교 = 4)
'SIDO_CODE': sido_code, # 시도 코드 (ex. 서울 특별시 = '1100000000')
'SIGUNGU_CODE': sigungu_code, # 시군구 코드 (ex. 강남구 = '1168000000')
'SULRIP_GB': '1',
'SULRIP_GB': '2',
'SULRIP_GB': '3',
'GS_HANGMOK_CD': '06', # 공시항목 (졸업생의 진로현황 = 06)
'PBAN_YR': '2020', # 공시 년도
'JG_HANGMOK_CD': '52', # 공시 년도
}
data = requests.post(url, data=data, verify=False)
school_data = data.json()
return school_data['schoolList']
# 3. 학교 정보
# 데이터가 없을 수도 있음 (ex. 서울특별시 강남구 단국대학교부속소프트웨어고등학교 - B100000373)
def get_school_info(sido, sigungu, school_name, school_code):
url = 'https://www.schoolinfo.go.kr/ei/pp/Pneipp_b06_s0p.do?'
params = {
'GS_HANGMOK_CD': '06',
'GS_HANGMOK_NO': '13-%EB%8B%A4',
'GS_HANGMOK_NM': '%EC%A1%B8%EC%97%85%EC%83%9D%EC%9D%98%20%EC%A7%84%EB%A1%9C%20%ED%98%84%ED%99%A9',
'GS_BURYU_CD': 'JG040',
'JG_BURYU_CD': 'JG130',
'JG_HANGMOK_CD': '52',
'JG_GUBUN': '1',
'JG_YEAR2': '2020',
'HG_NM': '%EA%B2%BD%EA%B8%B0%EA%B3%A0%EB%93%B1%ED%95%99%EA%B5%90',
'HG_CD': school_code, # SCHUL_CODE로만 바꿔주면 됨 ('B100000376')
'GS_TYPE': 'Y',
'JG_YEAR': '2020',
'CHOSEN_JG_YEAR': '2020',
'PRE_JG_YEAR': '2020',
'LOAD_TYPE': 'single',
'LOAD_TYPE': 'single'
}
data = requests.get(url, params=params, verify=False)
soup = BeautifulSoup(data.text, 'html.parser')
univ_entrance = soup.select_one(
'#excel > div.table_wrap > div.schoolinfo_table.graytable > table > tbody > tr:nth-child(4) > td:nth-child(3)')
if univ_entrance is None:
print(f'{school_name} 대학교 진학자률 : 😭 정보가 없습니다')
else:
print(f'{school_name} 대학교 진학자률 : {univ_entrance.text}%')
doc = {
'시도': sido,
'시군구': sigungu,
'고등학교': school_name, # DB에는 숫자처럼 생긴 문자열 형태로 저장됩니다.
'4년제 진학률': univ_entrance.text # DB에는 숫자처럼 생긴 문자열 형태로 저장됩니다.
}
db.dbschool.insert_one(doc)
def run():
# 시도, 시구군 목록 가져오기
sido_sigungu_list = get_sido_sigungu_list()
for sido_sigungu in sido_sigungu_list:
sido_name = sido_sigungu['sido_name']
sido_code = sido_sigungu['sido_code']
print(f'================ {sido_name} ================')
for sigungu in sido_sigungu['sigungu_list']:
sigungu_name = sigungu['sigungu_name']
sigungu_code = sigungu['sigungu_code']
# 해당 시도 / 시구군에 해당하는 학교 정보 조회
school_list = get_school(sido_code, sigungu_code)
for school in school_list:
school_name = school['SCHUL_NM']
school_code = school['SCHUL_CODE']
get_school_info(sido_name, sigungu_name, school_name, school_code)
# break
# break
# break
run()
| baek0001/my_project | school.py | school.py | py | 7,317 | python | ko | code | 0 | github-code | 13 |
24956679472 | #!/usr/bin/env python
import wsgiref.handlers
from google.appengine.ext import db
from google.appengine.ext import webapp
from google.appengine.ext.webapp import template
class Geek(db.Model):
message = db.StringProperty(required=True)
when = db.DateTimeProperty(auto_now_add=True)
who = db.StringProperty()
class MyHandler(webapp.RequestHandler):
def get(self, *groups):
geeks = db.GqlQuery('SELECT * FROM Geek '
'ORDER BY when DESC')
values = { 'geeks': geeks }
self.response.out.write(
template.render('main.html', values))
self.response.out.write('You accessed this page at the URL: /' + groups[0])
def post(self, *groups):
geek = Geek(message=self.request.get('message'),
who=self.request.get('who'))
geek.put()
# self.response.out.write('posted!')
self.redirect('/')
def main():
app = webapp.WSGIApplication([
(r'/(.*)', MyHandler)], debug=True)
wsgiref.handlers.CGIHandler().run(app)
if __name__ == '__main__':
main() | bjthinks/grapher | geekouttest/main.py | main.py | py | 972 | python | en | code | 1 | github-code | 13 |
25288846241 | class Solution(object):
def exist(self, board, word):
"""
:type board: List[List[str]]
:type word: str
:rtype: bool
"""
direct = [[0, 1], [0, -1], [1, 0], [-1, 0]]
n = len(board)
m = len(board[0])
global flag
flag = False
visit = [[0 for i in range(m)] for j in range(n)]
def helper(index, i, j, visit) -> None:
v = []
for t in visit:
v.append(tuple(t))
v = tuple(v)
if index == len(word):
return
for k in range(4):
i1 = i + direct[k][0]
j1 = j + direct[k][1]
if i1 >= 0 and i1 < n and j1 >= 0 and j1 < m and board[i1][j1] == word[index] and v[i1][j1] == 0:
visit = [list(l) for l in v]
print(visit)
visit[i1][j1] = 1
print(word[index])
if index + 1 == len(word):
global flag
flag = True
helper(index + 1, i1, j1, visit)
for i in range(n):
for j in range(m):
if board[i][j] == word[0]:
if len(word) == 1:
flag = True
# print(visit)
visit[i][j] = 1
helper(1, i, j, visit)
visit = [[0 for ii in range(m)] for jj in range(n)]
return flag
s = Solution()
BOOL = s.exist([["B","C","E"],["F","E","S"],["D","E","E"]],"ESEEEC")
print(BOOL) | kyx2333/Analysis_Algorithm | leetcode/79.py | 79.py | py | 1,610 | python | en | code | 0 | github-code | 13 |
33760013924 | import json
from channels.generic.websocket import WebsocketConsumer, AsyncWebsocketConsumer
from asgiref.sync import async_to_sync
class ChatConsumer(WebsocketConsumer):
def connect(self):
self.room_name = "chat"
self.room_group_name = "chat_room"
async_to_sync(self.channel_layer.group_add)(
self.room_group_name,
self.channel_name
)
self.accept()
def disconnect(self, close_code):
async_to_sync(self.channel_layer.group_discard)(
self.room_group_name,
self.channel_name
)
def receive(self, *args, **kwargs):
print(f"in receive {args} {kwargs}")
message = kwargs['text_data']
# Send message to room group
async_to_sync(self.channel_layer.group_send)(
self.room_group_name,
{
'type': 'chat_message',
'message': message
}
)
def chat_message(self, event):
print(f"chat_message {event}")
message = event['message']
# Send message to WebSocket
self.send(text_data=json.dumps({
'message': message
}))
def send_notification(self, *args, **kwargs):
print("in send notification")
print(f"args {args} kwargs {kwargs}")
self.send(text_data=json.dumps(args[0]['data']))
| tarunnar/chatproject | chatapp/consumers.py | consumers.py | py | 1,379 | python | en | code | 0 | github-code | 13 |
38821176188 | import threading
import time
def thread1():
for i in range(10):
print('thread 1- running')
time.sleep(3)
def thread2():
for i in range(10):
print('thread 2- running')
time.sleep(3)
t1 = threading.Thread(target=thread1)
t2 = threading.Thread(target=thread2)
tt = threading.currentThread().isDaemon()
print(tt)
t1.start()
t1.join()
t2.start() | Bhaskar100/DBTest | thread-test.py | thread-test.py | py | 385 | python | en | code | 0 | github-code | 13 |
1139683286 | textoTotal1 = 0
textoTotal2 = 0
textoTotal3 = []
with open('C:\\Curso01\\seccao13\\arquivos_de_texto\\ex07.txt') as arquivo1:
textoTotal1 = arquivo1.readlines()
with open('C:\\Curso01\\seccao13\\arquivos_de_texto\\ex08.txt') as arquivo2:
textoTotal2 = arquivo2.readlines()
textoTotal3.append(textoTotal1)
textoTotal3.append('\n')
textoTotal3.append(textoTotal2)
with open('C:\\Curso01\\seccao13\\arquivos_de_texto\\ex09.txt','a') as arquivo3:
for l in range(0,len(textoTotal3)):
for i in range(0,len(textoTotal3[l])):
linha = textoTotal3[l][i]
arquivo3.write(linha) | Sancheslipe/atividades_python_basico_ao_avancado_geral | seccao13/ex09.py | ex09.py | py | 611 | python | en | code | 0 | github-code | 13 |
4457946145 | # -*- coding: utf-8 -*-
from odoo import models, fields, api
class Book(models.Model):
_name = 'book_store.book'
name = fields.Char("名称", help='书名')
author = fields.Char('作者', help='作者')
date = fields.Datetime("出版日期", help="日期")
price = fields.Float("定价", help='定价')
_sql_constraints = [
('name_description_check',
'CHECK(name!=description)',
"The title of the course should not be the description"
),
('name_unique',
'UNIQUE(name)',
"The course title must be unique"
),
]
@api.one
def btn_test(self):
'''测试方法'''
self.env['book_store.publisher'].sudo().create({
"name": "超新星出版社",
"signed_authors": [(0, 0, {'name': '本杰明 巴顿', 'age': '90'}), (0, 0, {'name': '刘天然', 'age': 28})]
})
class Author(models.Model):
_name = "book_store.author"
name=fields.Char('名称',help='作者名称')
age=fields.Integer('年龄')
publisher_id=fields.Many2one(
'book_store.publisher',string='签约出版商',ondelete='no action',required=True
)
class Publisher(models.Model):
_name = 'book_store.publisher'
name=fields.Char('名称',help='出版社名称')
signer_authors=fields.One2many(
'book_store.author','publisher_id',string='签约作者'
)
# @api.one
# def btn_test(self):
# '''测试方法'''
#
# self.env['book_store.publisher'].sudo().create({
# "name":"超新星出版社",
# "signed_authors":[(0,0,{'name':'本杰明 巴顿','age':'90'}),(0,0, {'name': '刘天然', 'age': 28})]
# }) | 2232408653/model_test | book_store/models/book.py | book.py | py | 1,724 | python | en | code | 0 | github-code | 13 |
31931306095 | from Dataset import *
from tqdm import tqdm
import os
import torch
import torchvision
import matplotlib.pyplot as plt
import torch.backends.cudnn as cudnn
from Model import Model
def plot_graph(train_loss_curve):
# Training Loss vs Epochs
# plt.plot(range(10), [0.30456, 0.056742, -0.10345, -0.2049603, -0.255351, -0.283185, -0.3005435, -0.315708, -0.324204, -0.3288738], label="Train Loss")
plt.plot(range(len(train_loss_curve)), train_loss_curve, label="Train Loss")
plt.title("Epoch Vs Loss")
plt.legend()
plt.xlabel("Epochs")
plt.ylabel("Loss")
plt.savefig("Loss_curves.jpg")
plt.show()
def OrientationLoss(orient_batch, orientGT_batch, confGT_batch):
batch_size = orient_batch.size()[0]
indexes = torch.max(confGT_batch, dim=1)[1]
# extract just the important bin
orientGT_batch = orientGT_batch[torch.arange(batch_size), indexes]
orient_batch = orient_batch[torch.arange(batch_size), indexes]
theta_diff = torch.atan2(orientGT_batch[:,1], orientGT_batch[:,0])
estimated_theta_diff = torch.atan2(orient_batch[:,1], orient_batch[:,0])
return -1 * torch.cos(theta_diff - estimated_theta_diff).mean()
def calc_loss(orient, conf, dim, labels, device):
gt_orient = labels['Orientation'].float().to(device)
gt_conf = labels['Confidence'].long().to(device)
gt_dim = labels['Dimensions'].float().to(device)
alpha = 0.6
w = 0.4
orient_loss = OrientationLoss(orient, gt_orient, gt_conf)
dim_loss = torch.nn.functional.mse_loss(dim, gt_dim)
gt_conf = torch.max(gt_conf, dim=1)[1]
conf_loss = torch.nn.functional.cross_entropy(conf, gt_conf)
loss_theta = conf_loss + w * orient_loss
return alpha * dim_loss + loss_theta
def train_test_model(epochs, device, use_saved=False):
root = os.path.abspath(os.path.dirname(__file__))
train_path = root + '/Kitti/training'
save_dir = root + '/trained_models/'
model_path = root + '/trained_models/model_epoch_last.pth'
dataset = Dataset(train_path)
print("Obtained Training Data")
trainloader = torch.utils.data.DataLoader(dataset, batch_size=8, shuffle=True, num_workers=6)
backbone = torchvision.models.vgg.vgg19_bn(pretrained=True)
model = Model(features=backbone.features).to(device)
opt_SGD = torch.optim.SGD(model.parameters(), lr=0.0001, momentum=0.9)
start = 0
if use_saved:
checkpoint = torch.load(model_path)
model.load_state_dict(checkpoint['model_state_dict'])
start = checkpoint['epoch']
print('Found previous checkpoint at epoch ', start)
print('Resuming training....')
train_loss_curve = []
for epoch in range(start, start+epochs):
training_loss = 0
for i, (data, labels) in enumerate(tqdm(trainloader)):
data=data.float().to(device)
orient, conf, dim = model(data)
loss = calc_loss(orient, conf, dim, labels, device)
training_loss += loss.item()
opt_SGD.zero_grad()
loss.backward()
opt_SGD.step()
train_loss_curve.append(training_loss/len(trainloader))
print("Epoch: ",epoch+1," Training Loss:", training_loss/len(trainloader))
# Save Model after each epoch
name = save_dir + 'model_epoch_last.pth'
torch.save({'epoch': epoch+1, 'model_state_dict': model.state_dict()}, name)
return train_loss_curve
if __name__=='__main__':
epochs =10
use_saved = False
device = "cuda:0" if torch.cuda.is_available() else "cpu"
if torch.cuda.is_available():
cudnn.benchmark = True
train_loss_curve= train_test_model(epochs, device, use_saved)
plot_graph(train_loss_curve)
| Ruchi-Gupte/3D-Bounding-Box-with-Tracking | Train_3D_Features/Train.py | Train.py | py | 3,724 | python | en | code | 1 | github-code | 13 |
16451340634 |
# billentyűt lenyomom
# ha stunlock, akkor nem csinál semmit
# ha fut casttimer már, azt reseteli, kivéve ha 2es
# ha '2' volt a billentyű, akkor elindítja a casttimert
# ha casttimer végigér, akkor stunlock indul el
# ez csak beállít egy window title nevet -> ez alapján zárja be majd .ahk, ha bezártam obliviont!
from os import system
system("title fastpy")
import threading
import pythoncom, pyWinhook, sys, logging
import time
from pynput.keyboard import Key, Controller
import ctypes
import winsound
keySending = False
thread_cast = None
thread_stun = None
SendInput = ctypes.windll.user32.SendInput
# C struct redefinitions
PUL = ctypes.POINTER(ctypes.c_ulong)
class KeyBdInput(ctypes.Structure):
_fields_ = [("wVk", ctypes.c_ushort),
("wScan", ctypes.c_ushort),
("dwFlags", ctypes.c_ulong),
("time", ctypes.c_ulong),
("dwExtraInfo", PUL)]
class HardwareInput(ctypes.Structure):
_fields_ = [("uMsg", ctypes.c_ulong),
("wParamL", ctypes.c_short),
("wParamH", ctypes.c_ushort)]
class MouseInput(ctypes.Structure):
_fields_ = [("dx", ctypes.c_long),
("dy", ctypes.c_long),
("mouseData", ctypes.c_ulong),
("dwFlags", ctypes.c_ulong),
("time",ctypes.c_ulong),
("dwExtraInfo", PUL)]
class Input_I(ctypes.Union):
_fields_ = [("ki", KeyBdInput),
("mi", MouseInput),
("hi", HardwareInput)]
class Input(ctypes.Structure):
_fields_ = [("type", ctypes.c_ulong),
("ii", Input_I)]
# Actuals Functions
def PressKey(hexKeyCode):
extra = ctypes.c_ulong(0)
ii_ = Input_I()
ii_.ki = KeyBdInput( 0, hexKeyCode, 0x0008, 0, ctypes.pointer(extra) )
x = Input( ctypes.c_ulong(1), ii_ )
ctypes.windll.user32.SendInput(1, ctypes.pointer(x), ctypes.sizeof(x))
def ReleaseKey(hexKeyCode):
extra = ctypes.c_ulong(0)
ii_ = Input_I()
ii_.ki = KeyBdInput( 0, hexKeyCode, 0x0008 | 0x0002, 0, ctypes.pointer(extra) )
x = Input( ctypes.c_ulong(1), ii_ )
ctypes.windll.user32.SendInput(1, ctypes.pointer(x), ctypes.sizeof(x))
# szakdolgozatom:
def Finished_stun():
global thread_stun
winsound.Beep(700, 150)
print("- stunlock finished -")
thread_stun = None
def Finished_casting():
global thread_cast, thread_stun, keySending
print("- casting finished -")
keySending = True
thread_cast = None
thread_stun = threading.Timer(interval = 4, function = Finished_stun)
thread_stun.start()
print("- stunlock starts -")
keyboard = Controller()
PressKey(0x03) # DirectX keycode!!
time.sleep(0.1)
ReleaseKey(0x03) # DirectX keycode!!
def OnKeyboardEvent(event):
global thread_cast, thread_stun, keySending
print("Key:", chr(event.Ascii), " (KeyID:", event.KeyID, ")")
# if chr(event.Ascii) == '2' and keySending == True:
if event.KeyID == 50 and keySending == True: # numlock 2es miatt kell keyID
print("- key sending -")
keySending = False
return True
elif thread_stun is not None:
print("- under stunlock -")
return False
# elif chr(event.Ascii) == '2':
elif event.KeyID == 50:
if thread_cast is None:
print("- casting starts -")
thread_cast = threading.Timer(interval = 3, function = Finished_casting)
thread_cast.start()
winsound.Beep(400, 150)
return False
else:
print("- already casting, relax bro -")
return False
# elif chr(event.Ascii) != '2' and thread_cast is not None:
elif event.KeyID != 50 and thread_cast is not None:
print("- casting canceled -")
thread_cast.cancel()
thread_cast = None
winsound.Beep(200, 150)
return True
# ha false return, akkor nem fogad inputot sehol, csak pythonban --> ctrl + alt + del + egérrel tudok kilépni akkor
def OnMouseEvent(event):
if event.Wheel == 1:
print('ScrollUp')
# PressKey(0x09) # DirectX keycode!!
# time.sleep(0.1)
# ReleaseKey(0x09) # DirectX keycode!!
elif event.Wheel == -1:
print('ScrollDown')
# PressKey(0x0A) # DirectX keycode!!
# time.sleep(0.1)
# ReleaseKey(0x0A) # DirectX keycode!!
return True
# create the hook mananger
hm = pyWinhook.HookManager()
# register two callbacks
hm.KeyDown = OnKeyboardEvent
hm.MouseWheel = OnMouseEvent
# hook into the mouse and keyboard events
hm.HookKeyboard()
hm.HookMouse()
pythoncom.PumpMessages() # nem zárul be egyből
| chrishor29/OblivionCastTimer | fast.py | fast.py | py | 4,662 | python | en | code | 0 | github-code | 13 |
37284841870 | import matplotlib.pyplot as plt
from language_analysis.FILE_PATHS import LANGUAGE_TEXT_PATH
from ASCII_art import LOGO
from language_analysis.fonctions import parse_file, get_most_used_char
# pip install matplotlib OR pip3 install matplotlib
TEXT_PATH = 'text.txt'
NUM_MOST_FREQ_CHAR = 5
SHOW_HIST = True
if __name__ == '__main__':
print(LOGO, end='\n\n\n')
# Récupérations des occurences des caractères pour chaques langues.
stats = {}
for language in LANGUAGE_TEXT_PATH:
for file in LANGUAGE_TEXT_PATH[language]:
stats[language] = get_most_used_char(parse_file(file), NUM_MOST_FREQ_CHAR)
# Affichage en mode graphique ou console.
if SHOW_HIST:
for language in stats:
plt.title(f'{language}')
plt.ylabel('Occurences des caractères (en %)')
plt.xlabel(f'Les {NUM_MOST_FREQ_CHAR} caractères les plus fréquents')
plt.bar(stats[language].keys(), stats[language].values(), color='lightskyblue')
plt.show()
else:
print('Occurences des caractères dans chaques langues :')
print('═' * 256)
for language in stats:
print(f'\t• {language} -> ', end='')
for char in stats[language]:
print(f'{char} : {stats[language][char]}%', end=' | ')
print('\n' + '═' * 256)
# Récupération des occurences du fichier à analyser.
text = get_most_used_char(parse_file(TEXT_PATH), NUM_MOST_FREQ_CHAR)
# Affichage en mode graphique ou console.
if SHOW_HIST:
plt.title(f'Texte entré')
plt.ylabel('Occurences des caractères (en %)')
plt.xlabel(f'Les {NUM_MOST_FREQ_CHAR} caractères les plus fréquents')
plt.bar(text.keys(), text.values(), color='lightskyblue')
plt.show()
else:
print('Occurences des caractères dans le texte entré : \n\t', end='')
for char in text:
print(f'| {char} : {text[char]}', end=' | ')
print('\n')
print('Comparaison avec les fichiers de réferences : ')
# Comparaison des données du texte à analyser et des textes de réferences.
pourcent_match = {elt: 0 for elt in stats}
for language in stats:
pourcent = []
for char in text:
# Calcul du pourcentage de proximité entre les lettres du fichier à analyser et du fichier de réference.
if stats[language].get(char) is not None:
pourcent.append(text[char] * 100 / stats[language].get(char))
else:
# Si le caractère du texte à analyser n'est pas dans le texte de réference alors on ajoute 0%
pourcent.append(0)
somme = 0
# Calcul de la moyenne de chaque lettre pour donner le pourcentage final.
for elt in pourcent:
somme += elt
# Ajout du résultat dans un dictionnaire.
pourcent_match[language] = somme / len(pourcent)
print(f'\t• Le texte entré ressemble à {(somme / len(pourcent)):.2f}% à du {language}')
maximum_value = 0
maximum_key = ''
# Recherche de la langue la plus proche du texte à analyser.
for language in pourcent_match:
if pourcent_match[language] > maximum_value:
maximum_value = pourcent_match[language]
maximum_key = language
# Affichage final.
print('\n\n═ Résultat ' + '═' * 245)
print(f'La langue detecté pour le texte entré est : {maximum_key} ({maximum_value:.2f}%)')
| romainflcht/APP3 | main.py | main.py | py | 3,515 | python | fr | code | 0 | github-code | 13 |
72943547218 | # def variant_one(team, side, user):
# if side not in team:
# team[side] = []
# team[side].append(user)
# return team
#
#
# def variant_two(team, side, user):
# if side not in team:
# team[side] = []
# team[side].append(user)
# else:
# for key, value in teams.items():
# for i in value:
# if i == user:
# value.remove(i)
#
# team[side].append(user)
# print(f"{user} joins the {side} side!")
# return team
def add_user(team, side, user):
for key, value in teams.items():
if user in value:
return team
if side not in team:
team[side] = [user]
else:
team[side].append(user)
return team
# - 30 точки от тук
def change_side(team, side, user):
for key, value in teams.items():
if user in value:
team[key].remove(user)
return add_user(team, side, user)
return add_user(team, side, user)
# - 30 точки от тук
teams = {}
command = input()
while not command == "Lumpawaroo":
if "|" in command:
force_side, force_user = command.split(" | ")
add_user(teams, force_side, force_user)
elif "->" in command:
force_user, force_side = command.split(" -> ")
change_side(teams, force_side, force_user)
print(f"{force_user} joins the {force_side} side!")
command = input()
for key, value in teams.items():
if len(value) > 0:
print(f"Side: {key}, Members: {len(value)}")
for i in value:
print(f"! {i}")
| Andon-ov/Python-Fundamentals | 20_dictionaries_exercise/force_book.py | force_book.py | py | 1,619 | python | en | code | 0 | github-code | 13 |
23721773295 | # Suppoting code to download git repo.
import os
import subprocess
import configparser
config = configparser.ConfigParser()
config.read('pyconfig.ini')
GIT_REPO = config['DEFAULT']['GIT_REPO']
args = ['git', 'clone', '--depth=1', 'git@github.com:pramitmitra/ReadingNotes.git']
#args = ['git', 'clone', '--depth=1', GIT_REPO]
res = subprocess.Popen(args, stdout=subprocess.PIPE)
output, _error = res.communicate()
os.system('cp /Users/prammitr/Documents/my_projects/python/ReadingNotes/input_plSql.sql /Users/prammitr/Documents/my_projects/python/input')
os.system('rm -rf /Users/prammitr/Documents/my_projects/python/ReadingNotes')
if not _error:
print(output)
else:
print(_error) | pramitmitra/ReverseEngg_PLSQL | DownloadGitCode.py | DownloadGitCode.py | py | 691 | python | en | code | 0 | github-code | 13 |
10725724153 | codigo = 0
alc = 0
gas = 0
dies = 0
while codigo != 4:
if 1 <= codigo < 4:
if codigo == 1:
alc += 1
if codigo == 2:
gas += 1
if codigo == 3:
dies += 1
codigo = int(input())
print("MUITO OBRIGADO")
print(f"Alcool: {alc}")
print(f"Gasolina: {gas}")
print(f"Diesel: {dies}") | PacMan111/ProblemasBeecrowd | Python/1134.py | 1134.py | py | 362 | python | en | code | 0 | github-code | 13 |
8597501156 | class Solution:
def maxNumber(self, nums1, nums2, k):
m, n = len(nums1), len(nums2)
start, end = max(0, k - n), min(k, m)
return max(self.merge(self.getMaxSubsequence(nums1, i), self.getMaxSubsequence(nums2, k - i)) for i in range(start, end+1))
def getMaxSubsequence(self, nums, k):
stack = []
remain = len(nums) - k
for num in nums:
while stack and stack[-1] < num and remain > 0:
remain -= 1
stack.pop()
stack.append(num)
return stack[0:k]
def merge(self, subsequence1, subsequence2):
ans = []
while subsequence1 or subsequence2:
bigger = subsequence1 if subsequence1 > subsequence2 else subsequence2
ans.append(bigger.pop(0))
return ans
| HourunLi/Leetcode | SourceCode/MonotonousStack/0321_Create_Maximum_Number.py | 0321_Create_Maximum_Number.py | py | 818 | python | en | code | 1 | github-code | 13 |
20418558112 | import os
import matplotlib.pyplot as plt
import datetime
import requests
import json
import urllib.request
from deep_translator import GoogleTranslator
class Engine:
def __init__(self, town):
self.town = town
self.info = None
self.no_connection = False
self.download_data()
self.update_data()
def open_data(self):
with open('data.json', 'r') as file:
return json.load(file)
def download_data(self):
link = f"http://api.weatherapi.com/v1/forecast.json?key=7bfc6f3c5fec47a484d194203230505&q={self.town}&days=3&aqi=no&alerts=no"
try:
response = requests.get(link)
response.raise_for_status()
with open('data.json', 'w') as file:
json.dump(response.json(), file)
self.no_connection = False
except requests.exceptions.HTTPError as e:
if e.response.status_code >= 400:
self.no_connection = True
except (requests.exceptions.ConnectionError, requests.exceptions.Timeout):
self.no_connection = True
def update_data(self):
self.download_data()
if not self.no_connection:
self.info = self.open_data()
def plot(self, day=0):
fig, ax = plt.subplots(dpi=120)
fig.set_facecolor('#f0f0f0')
ax.set_facecolor("#f0f0f0")
ax.spines['top'].set_visible(False)
ax.spines['right'].set_visible(False)
ax.spines['bottom'].set_visible(False)
ax.spines['left'].set_visible(False)
if self.no_connection:
return fig
temps = []
dates = []
for data in self.info['forecast']['forecastday'][day]['hour']:
temps.append(data["temp_c"])
dates.append(data['time'][-5:])
ax.plot(dates, temps, c='yellow')
ax.grid(axis='y')
ax.set_ylabel('Temperatura [°C]')
ax.fill_between(dates, temps, facecolor='yellow', alpha=0.45)
ax.set_xticks(range(0, len(dates), 3))
ax.set_xticklabels(dates[::3])
ax.set_xlim((0, 23))
ax.set_ylim(bottom=min(temps) - 1)
return fig
def get_date(self, day=0):
if self.no_connection:
return "bd."
if day == 0:
return self.info["current"]["last_updated"]
else:
return self.info['forecast']['forecastday'][day]["date"]
def get_img(self, day=0):
if self.no_connection:
return "bd."
if day == 0:
url2 = str(self.info["current"]["condition"]["icon"])
else:
url2 = self.info['forecast']['forecastday'][day]["day"]["condition"]["icon"]
url = "https://cdn.weatherapi.com/weather/128x128"
i = url2.find('x64/')
url2 = url2[i + 3::]
url = url + url2
data = urllib.request.urlopen(url).read()
return data
def get_temp(self, day=0):
if self.no_connection:
return "bd."
if day == 0:
return self.info["current"]["temp_c"]
else:
return self.info['forecast']['forecastday'][day]["day"]["maxtemp_c"]
def get_wind(self, day=0):
if self.no_connection:
return "bd."
if day == 0:
return self.info["current"]["wind_kph"]
else:
return self.info['forecast']['forecastday'][day]["day"]["maxwind_kph"]
def get_humidity(self, day=0):
if self.no_connection:
return "bd."
if day == 0:
return self.info["current"]["humidity"]
else:
return self.info['forecast']['forecastday'][day]['hour'][5]["humidity"]
def get_pressure(self, day=0):
if self.no_connection:
return "bd."
if day == 0:
return self.info["current"]["pressure_mb"]
else:
return self.info['forecast']['forecastday'][day]['hour'][5]["pressure_mb"]
def get_desc(self, day=0):
if self.no_connection:
return "bd."
if day == 0:
text = self.info["current"]["condition"]["text"]
else:
text = self.info['forecast']['forecastday'][day]["day"]["condition"]["text"]
return GoogleTranslator(source='auto', target='pl').translate(text)
def get_city(self):
if self.no_connection:
return "bd."
text = self.info["location"]["name"]
return GoogleTranslator(source='auto', target='pl').translate(text)
def rain_plot(self, day=0):
fig, ax = plt.subplots(dpi=120)
fig.set_facecolor('#f0f0f0')
ax.set_facecolor("#f0f0f0")
ax.spines['top'].set_visible(False)
ax.spines['right'].set_visible(False)
ax.spines['bottom'].set_visible(False)
ax.spines['left'].set_visible(False)
if self.no_connection:
return fig
chances = []
dates = []
for data in self.info['forecast']['forecastday'][day]['hour']:
chances.append(data["chance_of_rain"])
dates.append(data['time'][-5:])
for i in range(0, len(chances) - 1):
ax.plot([dates[i], dates[i + 1]], [chances[i], chances[i]], c='blue')
ax.fill_between([dates[i], dates[i + 1]], [chances[i], chances[i]], facecolor='blue', alpha=0.35)
ax.grid(axis='y')
ax.set_ylabel('Szansa opadów [%]')
ax.set_xticks(range(0, len(dates), 3))
ax.set_xticklabels(dates[::3])
ax.set_xlim((0, 23))
ax.set_ylim((-0.1, 100))
return fig
if __name__ == "__main__":
b = Engine("Lasdsadon")
fig = b.rain_plot()
plt.show()
| przemek-dul/Weatherapp | engine.py | engine.py | py | 5,667 | python | en | code | 0 | github-code | 13 |
13617659853 | # -*- coding:utf-8 -*-
# 多线程 下载 豆瓣 刘诗诗 图片
import requests
import json
import os
import random
from selenium import webdriver
from queue import Queue
from lxml import etree
from fake_useragent import UserAgent
from threading import Thread
from time import time, sleep
PIC_PATH = "shishi"
ua = UserAgent()
# 请求头
headers = {
'accept': 'text/html, application/xhtml+xml, application/xml; q=0.9, image/webp, */*; q=0.8',
'user-agent': ua.random
}
# 爬虫类
class CrawlInfo(Thread):
def __init__(self, url_queue, html_queue):
Thread.__init__(self)
self.url_queue = url_queue
self.html_queue = html_queue
def run(self):
while self.url_queue.empty() == False:
url = self.url_queue.get()
res = requests.get(url, headers=headers)
if res.status_code == 200:
self.html_queue.put(res.text)
sleep_time = random.randint(0, 2) + random.random()
sleep(sleep_time)
else:
print("请求失败,{}".format(str(res.status_code)))
# 解析图片列表类
class ParseImageList(Thread):
def __init__(self, url_queue, html_queue):
Thread.__init__(self)
self.url_queue = url_queue
self.html_queue = html_queue
def run(self):
while self.html_queue.empty() == False:
html = self.html_queue.get()
formatting_pages = etree.HTML(html)
images = formatting_pages.xpath(
"//ul[@class='poster-col3 clearfix']/li/div[@class='cover']/a/img/@src")
for image in images:
self.url_queue.put(image)
def crawl_func(url_q, html_q):
crawl_list = []
for _ in range(6):
crawl_obj = CrawlInfo(url_q, html_q)
crawl_list.append(crawl_obj)
crawl_obj.start()
for crawl in crawl_list:
crawl.join()
def parse_func(url_q, html_q):
parse_list = []
for _ in range(6):
parse_obj = ParseImageList(url_q, html_q)
parse_obj.start()
parse_list.append(parse_obj)
for parse in parse_list:
parse.join()
# 下载图片
def download_info(image):
dir_name = base_path + image.split("/")[-1].split(".")[0] + ".jpg"
try:
pic = requests.get(image, timeout=12, headers=headers)
with open(dir_name, "wb") as f:
f.write(pic.content)
except requests.exceptions.ConnectionError:
print("连接失败,图片无法下载")
if __name__ == '__main__':
start_time = time()
# 声明 存放url的队列 和 html源码的队列
url_queue = Queue()
html_queue = Queue()
# 创建存放图片的文件夹 exist_ok=True 表示如果文件夹已存在,就什么都不做
os.makedirs(PIC_PATH, exist_ok=True)
base_path = os.getcwd() + "/" + PIC_PATH + "/"
# 将 图片列表的url添加到url队列中去
for i in range(0, 2791, 30):
url = "https://movie.douban.com/celebrity/1274533/photos/?type=C&start={}&sortby=like&size=a&subtype=a".format(str(i))
url_queue.put(url)
crawl_func(url_queue, html_queue)
parse_func(url_queue, html_queue)
for image in url_queue.get():
print(image)
download_info(image)
end_time = time()
print(end_time-start_time)
| pyl-10/web_crawler | cecilia_liu_pictures.py | cecilia_liu_pictures.py | py | 3,315 | python | en | code | 0 | github-code | 13 |
24915075936 | #
# Turn the data scraped (by copy paste) from
# https://www.gbmaps.com/4-digit-postcode-maps/free-uk-postcode-district-maps.htm
# into a CSV.
#
# Manually fixed typos / inconsistencies in input:
# - SSwansea -> Swansea
# - London N -> N-London
# - added E-London to London
# - added HS-Outer Hebrides to Scotland
# - added KA-Kilmarnock to Scotland
# - added LN-Lincoln to East Midlands
# - added ZE-Lerwick to Scotland
# - SS-Southend was listed twice, once in Midlands and once in South East.
# I've kept it in South East, because visually it was there.
#
import csv
import re
with open('postcode-regions.txt', 'r') as file:
lines = file.readlines()
AREA_RX = re.compile(r'([A-Z]{1,2})-')
with open('postcode-regions.csv', 'w') as csv_file:
csv_writer = csv.writer(csv_file)
csv_writer.writerow(['region', 'area', 'area_name'])
while lines:
region = lines.pop(0).rstrip().title()
areas_line = lines.pop(0).rstrip()
areas = AREA_RX.split(areas_line)
assert(areas.pop(0) == '')
assert(len(areas) % 2 == 0)
for i in range(len(areas) // 2):
csv_writer.writerow([
region,
areas[2 * i].strip(),
areas[2 * i + 1].strip()
])
if lines:
lines.pop(0)
| TechForUK/my_eu | prototype/data/postcode-regions/postcode-regions.py | postcode-regions.py | py | 1,309 | python | en | code | 18 | github-code | 13 |
43989860556 | def draw_star(k):
for i in range(k):
for j in range(k):
if i % 3 == 1 and j % 3 == 1:
print(' ', end='')
else:
print('*', end='')
print()
num = int(input(''))
draw_star(num) | ryanjung94/Algorithm_study | 2020_01_19/draw_star/draw_star.py | draw_star.py | py | 251 | python | en | code | 0 | github-code | 13 |
72101940817 | import torch
import torch.nn.functional as F
from torch.nn.modules.loss import _Loss
from torch import nn
class OrderedLoss(_Loss):
def __init__(self, alpha=1, beta=0.5):
super(OrderedLoss, self).__init__()
self.criterion = nn.CrossEntropyLoss()
self.alpha = alpha
self.beta = beta
self.epsilon = 0.01 # avoid log(0)
def forward(self, x, target):
loss_1 = self.criterion(x, target)
x = F.softmax(x, dim=1)
loss_2 = -(x.argmax(1).float()-target.float())**2*(torch.log(1-x.max(1)[0]+self.epsilon))
loss = self.alpha*loss_1+self.beta*loss_2.mean()
return loss | yongpi-scu/BRNet | nets/loss/orderedloss.py | orderedloss.py | py | 646 | python | en | code | 2 | github-code | 13 |
7834568730 | from io import BytesIO
from typing import IO, Optional
from flask import wrappers
from secure_tempfile import SecureTemporaryFile
from werkzeug.formparser import FormDataParser
class RequestThatSecuresFileUploads(wrappers.Request):
def _secure_file_stream(
self,
total_content_length: Optional[int],
content_type: Optional[str],
filename: Optional[str] = None,
content_length: Optional[int] = None,
) -> IO[bytes]:
"""Storage class for data streamed in from requests.
If the data is relatively small (512KB), just store it in
memory. Otherwise, use the SecureTemporaryFile class to buffer
it on disk, encrypted with an ephemeral key to mitigate
forensic recovery of the plaintext.
"""
if total_content_length is None or total_content_length > 1024 * 512:
# We don't use `config.TEMP_DIR` here because that
# directory is exposed via X-Send-File and there is no
# reason for these files to be publicly accessible. See
# note in `config.py` for more info. Instead, we just use
# `/tmp`, which has the additional benefit of being
# automatically cleared on reboot.
return SecureTemporaryFile("/tmp") # noqa: S108
return BytesIO()
def make_form_data_parser(self) -> FormDataParser:
return self.form_data_parser_class(
self._secure_file_stream,
self.charset,
self.encoding_errors,
self.max_form_memory_size,
self.max_content_length,
self.parameter_storage_class,
)
| freedomofpress/securedrop | securedrop/request_that_secures_file_uploads.py | request_that_secures_file_uploads.py | py | 1,655 | python | en | code | 3,509 | github-code | 13 |
36196613321 | import re
file = open('html_songs_list.txt')
content = file.read()
file.close()
songs_matches = re.findall('\/tracks">(?:.*)(?=<\/a>)', content)
songs = list()
for song_match in songs_matches:
song_name = song_match.split('>')[1] + ' - Lil Wayne'
if '\'' in song_name:
song_name = song_name.replace('\'', '')
songs.append(song_name)
# songs.append(song_name + ' Clean')
with open('songs.txt', 'w') as f:
for item in songs:
f.write("%s\n" % item) | Shoop123/audio-f-word-detection | song_downloader/extract_music.py | extract_music.py | py | 464 | python | en | code | 1 | github-code | 13 |
21359678914 | # coding: utf-8
import json
from selenium.webdriver import Chrome
from selenium.webdriver.chrome.options import Options
def get_json_list( url ):
chrome.get(url)
elem = []
object_dict = {}
elem = chrome.find_elements_by_class_name("rc") # Находим элементы с название класа
obj = 1
for i in elem:
name_object = i.text.split('\n')[0] # Сплитуем первую строку из текста получаем имя объекта
url_object = i.find_element_by_tag_name('a').get_attribute('href') # Получаем ссылку на объект
description_object = i.text.split('\n')
if description_object.count('Перевести эту страницу'):
description_object = description_object[3]
else:
description_object = description_object[2]
if description_object.count('г. —'):
description_object = description_object.split('г. — ')[1] # Сплитуем нужное нам описание
object_dict["object" + str(obj)] = { # Создаем предварительный словарь
'Name' : name_object,
'Url' : url_object,
'Description' : description_object
}
obj += 1
with open('json_parse.json','w', encoding='utf-8') as file: # Создаем json файл
json.dump(object_dict, file, ensure_ascii=False)
if __name__ == '__main__':
url = 'https://www.google.com/search?q=scrapy' # Url для парсинга
webdriver = '/usr/local/bin/chromedriver' # Путь к файлу драйвера для selenium
opts = Options()
opts.set_headless()
assert opts.headless # Отключаем графический интерфейс
chrome = Chrome(webdriver, options=opts)
get_json_list( url ) | MaxiZhorin/google_parser_bot | sol.py | sol.py | py | 1,887 | python | ru | code | 0 | github-code | 13 |
35552355632 | """
Implémantation du module Tag.
"""
import discord
from discord.ext import commands
import random
import pymysql
import asyncio
from pymysql import cursors
from Classes import MarianneException, GestionnaireResources
from Classes.GestionnaireResources import GestionnaireResources
from Fonctions import Message, Erreur
class Tag(commands.Cog):
def __init__(self, gestRes : GestionnaireResources):
self.client = gestRes.client
self.connectionBD = gestRes.connectionBD
@commands.group()
async def tag(self, ctx):
"""Groupe de commande tag. Ce groupe de commande
permet à l'utilisateur de définir un mot clé avec
un certain texte. Le texte peut être récupéré avec
le mot clé."""
#Vérification que l'utilisateur existe dans la base de données.
with self.connectionBD.cursor(cursors.Cursor) as cur:
requete = "SELECT (EXISTS (SELECT * FROM utilisateur WHERE utilisateur.discord_id=%s));"
cur.execute(requete, ctx.message.author.id)
if cur.fetchone()[0] == 0:
raise MarianneException.NonEnregDiscord
else:
pass
@tag.command()
async def set(self, ctx, p_tagNom: str = "", *, p_tagText: str = ""):
"""Permet à l'utilisateur de sauvegarder un tag.
Args:
ctx (): Le contexte.
p_tagNom (str): Le nom du tag.
p_tagText (str): Le texte du tag.
"""
#Paramètres manquants.
if p_tagNom == "" or p_tagText == "":
return await ctx.send("Missing parameters. You have to give me a tag name and a text to remember. e.g. 'm/tag set joke a really funny joke'...")
with self.connectionBD.cursor(cursors.Cursor) as cur:
#Vérification si le tag existe déjà.
requete = "SELECT EXISTS(SELECT * FROM tag WHERE tag.utilisateur_discord_id=%s AND tag.tag_nom=%s);"
cur.execute(requete, (ctx.message.author.id, p_tagNom))
#Le tag n'existe pas. Aucun traitement spécial.
if cur.fetchone()[0] == 0:
requete = "INSERT INTO tag VALUES (%s, %s, %s);"
cur.execute(requete, (ctx.message.author.id, p_tagNom, p_tagText))
return await ctx.send(f"Tag was saved successfully. You can get it again with 'm/tag get {p_tagNom}'.")
#La tag existe. L'utilisateur doît choisir s'il veut écraser l'ancien ou ne rien faire.
else:
try:
choix = await Message.demanderEntree(ctx, self.client, None, f"You've already registered {p_tagNom} as a tag. Should I overwrite the old one? Type (y/n)...")
except asyncio.TimeoutError:
return await ctx.send()
#Actions selon le choix de l'utilisateur.
if choix == "y":
requete = "UPDATE tag SET tag.tag_text=%s WHERE tag.utilisateur_discord_id=%s AND tag.tag_nom=%s;"
cur.execute(requete, (p_tagText, ctx.message.author.id, p_tagNom))
elif choix=="n":
return await ctx.send("Alright, I canceled the command.")
else:
return await ctx.send("Invalid input. I canceled the command just to be safe.")
return await ctx.send(f"Tag was successfully saved. You can print the message again with 'm/tag get {p_tagNom}'.")
@tag.command()
async def get(self, ctx, tagNom: str):
"""Permet à l'utilisateur de récupéré un tag sauvegarder auparavant.
Args:
ctx ([type]): Le contexte de la commande.
tagNom (str): Le nom du tag demandé.
Returns:
Envoi un message avec le tag.
"""
with self.connectionBD.cursor() as cur:
#Recherche du tag pertinent.
requete = "SELECT * FROM tag WHERE tag.tag_nom=%s AND utilisateur_discord_id=%s;"
cur.execute(requete, (tagNom, ctx.message.author.id))
resultat = cur.fetchone()
#Le tag n'existe pas.
if resultat is None:
return await ctx.send("I couldn't find the tag your requested. You can see a list of your registered tags with 'm/tag mytags'.")
#Le tag existe.
else:
return await ctx.send(f"**{tagNom}:**\n{resultat['tag_text']}")
@tag.command()
async def mytags(self, ctx):
"""Permet d'afficher la liste des tags enregistrés au nom de l'utilisateur.
Args:
ctx: Le contexte de la commande.
"""
with self.connectionBD.cursor(cursors.Cursor) as cur:
requete = "SELECT t.tag_nom FROM tag t WHERE t.utilisateur_discord_id=%s ORDER BY t.tag_nom;"
cur.execute(requete, ctx.message.author.id)
resultat = cur.fetchall()
#L'utilisateur n'a pas de tag enregistrés.
if resultat is None:
return await ctx.send("You don't have any registered tags.")
#L'utilisateur a des tags enregistrés. On les affiches dans un message formaté.
else:
message = f"List of all tags for {ctx.message.author.mention}\n```\n"
#On ajoute le nom des tags.
for tagNom in resultat:
message += f"{tagNom[0]}\n"
message += "```"
return await ctx.send(message)
@tag.command(aliases=["del"])
async def delete(self, ctx, tagNom: str = ""):
"""Permet à l'utilisateur de supprimer un tag enregistré.
Args:
ctx: Le contexte de la commande.
tagNom (str, optional): Nom du tag à supprimer. Defaults to "".
Returns:
Si nomTag est valide, le tag sera supprimé.
"""
if tagNom == "":
return await ctx.send("You must pass the name of the tag you wish to delete.")
with self.connectionBD.cursor() as cur:
requete = "DELETE FROM tag WHERE tag.tag_nom=%s AND tag.utilisateur_discord_id=%s;"
cur.execute(requete, (tagNom, ctx.message.author.id))
#Avertissement si la requête n'a affectée aucun tuple.
if cur.rowcount == 0:
return await ctx.send(f"I couldn't find a tag named {tagNom}")
return await ctx.send("The tag was successfully deleted.")
| aleclev/marianne-bot | Commandes/Tag.py | Tag.py | py | 6,528 | python | fr | code | 1 | github-code | 13 |
23200031466 | from setuptools import setup, find_packages
from spso.version import __version__, __author__, __email__, __license__
import os
desc = "Simple particle swarm optimizer in python"
setup( name = 'spso',
version = __version__,
description = desc,
long_description = desc,
long_description_content_type = 'text/plain',
author = __author__,
author_email = __email__,
url = 'http://www.github.com/nicochidt/pythonspo',
packages = find_packages(),
scripts = [],
license = __license__,
classifiers = [
'Programming Language :: Python :: 3',
'Development Status :: 5 - Production/Stable',
'License :: OSI Approved :: GNU General Public License (GPL)',
'Environment :: Console',
],
)
| nicochidt/pythonspo | setup.py | setup.py | py | 935 | python | en | code | 1 | github-code | 13 |
73917840657 | """
- poll a website (based on 'onlinetermine.zollsoft.de') that references available doses of vaccination
- if one slot is available, fill the registration form very quickly (faster that humans) and let the user validate
"""
import datetime
import time
from selenium.webdriver.chrome.options import Options
from selenium import webdriver
from webdriver_manager.chrome import ChromeDriverManager
def apply_keys(link, sleep_time=0.2):
""" the sequence of keys is of course specific to this page. But it can be easily adapted """
with webdriver.Chrome(ChromeDriverManager().install()) as driver:
driver.get(link)
time.sleep(1)
from selenium.webdriver.common.keys import Keys
from selenium.webdriver.common.action_chains import ActionChains
def tab(t):
ActionChains(driver).send_keys(Keys.TAB).perform()
time.sleep(t)
def space(t):
ActionChains(driver).send_keys(Keys.SPACE).perform()
time.sleep(t)
def shift_tab(t):
a = ActionChains(driver)
a.key_down(Keys.SHIFT).send_keys(Keys.TAB).key_up(Keys.SHIFT)
a.perform()
time.sleep(t)
# first page
tab(sleep_time)
tab(sleep_time)
# tab(sleep_time) # for Johnson
space(sleep_time)
tab(sleep_time)
space(sleep_time)
tab(sleep_time)
space(sleep_time)
# second page
shift_tab(sleep_time)
shift_tab(sleep_time)
space(sleep_time)
tab(sleep_time)
tab(sleep_time)
space(sleep_time)
# third page
shift_tab(sleep_time)
shift_tab(sleep_time)
shift_tab(sleep_time)
space(sleep_time)
for _ in range(6):
shift_tab(sleep_time / 10)
for string in [
"firstname",
"lastname",
"dd.mm.yyyy",
"0123456789",
"my_address@gmail.com"
]:
ActionChains(driver).send_keys(string).perform()
time.sleep(sleep_time)
tab(sleep_time)
for _ in range(4):
tab(sleep_time)
# at the point the form is filled. The user should make final 'ENTER' or 'SPACE'
# space(sleep_time)
while True:
pass
def main():
link = "https://onlinetermine.zollsoft.de/patientenTermine.php?uniqueident=6087dd08bd763"
i = 0
while True:
# adapt the frequency
time.sleep(10)
print(i)
print(datetime.datetime.fromtimestamp(time.time()).strftime('%Y-%m-%d %H:%M:%S'))
i += 1
chrome_options = Options()
chrome_options.headless = True
try:
with webdriver.Chrome(ChromeDriverManager().install(), options=chrome_options) as driver:
driver.get(link)
time.sleep(1)
# check the content
content = driver.page_source
# in this particular case, a first option for AstraZeneca is always present
if "Impfung mit AstraZeneca" not in content:
print('error: no [Impfung mit AstraZeneca]')
continue
# trigger: in this particular case, I am looking for the J&J option
if "Johnson" in content:
apply_keys(link=link)
except Exception as e:
print(e)
time.sleep(1)
if __name__ == '__main__':
main()
| chauvinSimon/appointment_bot | main.py | main.py | py | 3,499 | python | en | code | 1 | github-code | 13 |
72829503379 | import bpy
from bpy.props import *
from mathutils import Vector
from ... base_types import AnimationNode
from ... events import propertyChanged
class rollerNode(bpy.types.Node, AnimationNode):
bl_idname = "an_rollerNode"
bl_label = "Roller Node"
bl_width_default = 200
val_x = FloatProperty(name = "Datum X", default = 0, precision = 2)
val_y = FloatProperty(name = "Datum Y", default = 0, precision = 2)
val_z = FloatProperty(name = "Datum Z", default = 0, precision = 2)
cum_d = FloatProperty(name = "Cum Dist", default = 0, precision = 2)
message1 = StringProperty("")
def create(self):
self.newInput("Object","Parent","obj")
self.newOutput("Float","Delta Offset","dist")
def draw(self,layout):
layout.prop(self, "val_x")
layout.prop(self, "val_y")
layout.prop(self, "val_z")
layout.prop(self, "cum_d")
if (self.message1 != ""):
layout.label(self.message1, icon = "ERROR")
def execute(self,obj):
self.use_custom_color = True
self.useNetworkColor = False
self.color = (0.8,0.9,1)
if obj is None:
return
if len(obj.animation_data.action.fcurves) < 3:
self.message1 = 'Too few Keyframes XY&Z'
return
frm = bpy.context.scene.frame_current
if frm <= 1:
self.message1 = ''
self.cum_d = 0
for i in obj.animation_data.action.fcurves[0].keyframe_points:
key = i.co
if key[0] == frm:
self.val_x = key[1]
for i in obj.animation_data.action.fcurves[1].keyframe_points:
key = i.co
if key[0] == frm:
self.val_y = key[1]
for i in obj.animation_data.action.fcurves[2].keyframe_points:
key = i.co
if key[0] == frm:
self.val_z = key[1]
return (obj.location - Vector((self.val_x,self.val_y,self.val_z)) ).length
| Clockmender/My-AN-Nodes | nodes/general/roller.py | roller.py | py | 1,981 | python | en | code | 16 | github-code | 13 |
27190856278 | from zad3testy import runtests
# gorsza złożoność, po prostu odczytuje wartości z drzewa
# i wstawiam do tablicy
def maxim( T, C ):
def GetHeight(T):
h = 0
curr = T
while curr:
curr = curr.right
h += 1
return h
def GetTab(node,idx):
nodes[idx] = node.key
if node.left: GetTab(node.left, 2*idx)
if node.right: GetTab(node.right, 2*idx + 1)
nodes = [None]*(2**GetHeight(T))
GetTab(T,1)
return max(nodes[i] for i in C)
runtests( maxim )
| JakubWorek/algorithms_and_data_structures_course | TREES/maxin/zad3.py | zad3.py | py | 557 | python | pl | code | 0 | github-code | 13 |
36704816195 | """Decoder builds the decoder network on a given latent variable."""
import tensorflow as tf
from tensorflow import distributions as ds
def decoder(latent, img_size, units):
"""Decoder builds a decoder network on the given latent variable tensor.
Args:
lv (tf.Tensor): sample_size x batch_size x latent_size latent tensor.
Returns:
(tf.distribution.Normal): The batch_shape = (sample x batch x img)
normal distributions representing the sampled img likelihoods.
"""
hidden = tf.layers.dense(latent, units)
loc = tf.layers.dense(hidden, img_size)
scale = tf.layers.dense(hidden, img_size)
return ds.Normal(loc, scale)
| cshenton/auto-encoding-variational-bayes | vae/decoder.py | decoder.py | py | 682 | python | en | code | 19 | github-code | 13 |
35677389432 | # ADXL-345 program for the client device
# Will act as an anti-tampering sensor
import time
import board
import busio
import adafruit_adxl34x
from api_call import DeliveryDetectorBox
def run_adxl(box_num, names):
box = DeliveryDetectorBox(box_num)
i2c = busio.I2C(board.SCL, board.SDA)
adxl = adafruit_adxl34x.ADXL345(i2c)
adxl.enable_motion_detection(threshold=18)
while True:
val = adxl.events['motion']
if (val == True):
# send an alert to each user assigned to the box
for name in names:
box.send_tamper_alert(name, 'move')
time.sleep(0.5)
if __name__ == '__main__':
box = DeliveryDetectorBox(1)
run_adxl(box, ['johng'])
| Capstone-Projects-2022-Spring/project-delivery-detector | ClientDevice/adxl.py | adxl.py | py | 720 | python | en | code | 2 | github-code | 13 |
26573240350 | import socket
import threading
from queue import Queue
print_lock = threading.Lock()
target = "www.google.com"
def port_scan(port):
soc = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
try:
connection = soc.connect((target, port))
with print_lock:
print(f'Port {port} is open!')
connection.close()
except:
pass
def threader():
"""
task that each thread has to do
"""
while True:
worker = q.get()
port_scan(worker)
q.task_done()
if __name__ == '__main__':
q = Queue()
#number of threads
for worker in range(50):
t = threading.Thread(target = threader)
t.daemon = True
t.start()
for port in range(1, 101):
q.put(port)
q.join()
| Mathanraj-Sharma/python_socket_programming | 03_threaded_port_scanner.py | 03_threaded_port_scanner.py | py | 679 | python | en | code | 0 | github-code | 13 |
13402451589 | #!/usr/bin/python
"""
--- Day 4: Security Through Obscurity ---
Finally, you come across an information kiosk with a list of rooms. Of
course, the list is encrypted and full of decoy data, but the
instructions to decode the list are barely hidden nearby. Better remove
the decoy data first.
Each room consists of an encrypted name (lowercase letters separated by
dashes) followed by a dash, a sector ID, and a checksum in square
brackets.
A room is real (not a decoy) if the checksum is the five most common
letters in the encrypted name, in order, with ties broken by
alphabetization. For example:
- aaaaa-bbb-z-y-x-123[abxyz] is a real room because the most common letters
are a (5), b (3), and then a tie between x, y, and z, which are listed
alphabetically.
- a-b-c-d-e-f-g-h-987[abcde] is a real room because although the letters
are all tied (1 of each), the first five are listed alphabetically.
- not-a-real-room-404[oarel] is a real room.
- totally-real-room-200[decoy] is not.
Of the real rooms from the list above, the sum of their sector IDs is 1514.
What is the sum of the sector IDs of the real rooms?
--- Part Two ---
With all the decoy data out of the way, it's time to decrypt this list
and get moving.
The room names are encrypted by a state-of-the-art shift cipher, which is
nearly unbreakable without the right software. However, the information
kiosk designers at Easter Bunny HQ were not expecting to deal with a
master cryptographer like yourself.
To decrypt a room name, rotate each letter forward through the alphabet a
number of times equal to the room's sector ID. A becomes B, B becomes C,
Z becomes A, and so on. Dashes become spaces.
For example, the real name for qzmt-zixmtkozy-ivhz-343 is "very encrypted
name".
What is the sector ID of the room where North Pole objects are stored?
"""
import re
import sys
def get_data(name):
f = open(name, 'r')
return f.readlines()
def main():
if len(sys.argv) < 2:
print("Usage: %s <input_file>" % sys.argv[0])
sys.exit(1)
data = get_data(sys.argv[1])
sector_sum = 0
storage_id = 0
for line in data:
m = re.search('^(.+)-(\d+)\[(.+)\]$', line.strip())
name = m.group(1)
sector_id = int(m.group(2))
checksum = m.group(3)
occurrences = {}
for ch in name:
if ch != '-':
if ch in occurrences:
occurrences[ch] += 1
else:
occurrences[ch] = 1
sorted_items = sorted(
occurrences.items(), key=lambda x: (-x[1], x[0]))
valid = True
for i, ch in enumerate(checksum):
if ch != sorted_items[i][0]:
valid = False
break
if valid:
sector_sum += sector_id
s_name = ''
for ch in name:
for n in range(sector_id):
if ch == '-':
ch = ' '
break
else:
if ch == 'z':
ch = '`'
ch = chr(ord(ch) + 1)
s_name += ch
if s_name == 'northpole object storage':
storage_id = sector_id
print("[Star 1] Sum: %d" % sector_sum)
print("[Star 2] Sector ID: %d" % storage_id)
if __name__ == '__main__':
main()
| jtyr/advent-of-code-2016 | 04.py | 04.py | py | 3,363 | python | en | code | 0 | github-code | 13 |
24814262995 | import sys
import os
class SystemInfo:
def __init__(self):
self.isRunning = False
self.isRaspberryPi = "linux" in sys.platform
if(self.isRaspberryPi):
self.arduinoPort = "/dev/ttyACM0"
self.bluetoothPort = "/dev/rfcomm0"
self.enableWindow = "DISPLAY" in os.environ
else:
self.arduinoPort = "COM3"
self.bluetoothPort = "COM4"
self.enableWindow = True
self.isTracking = False
self.trackedRects = [(0, 0, 0, 0)]
self.trackedDirections = [0] | MinervaBots/Trekking | firmware/pi/SystemInfo.py | SystemInfo.py | py | 592 | python | en | code | 1 | github-code | 13 |
15639294293 | """Дополнительные классы для настройки основных классов приложения."""
from django.db.models import Model, Q
from django.shortcuts import get_object_or_404
from rest_framework import status
from rest_framework.response import Response
from rest_framework.serializers import ModelSerializer
from core.constants import Methods
class AddDelView:
"""
Добавляет во Viewset дополнительные методы.
Содержит метод добавляющий/удаляющий объект связи
Many-to-Many между моделями.
Требует определения атрибута `add_serializer`.
"""
add_serializer: ModelSerializer | None = None
def _add_del_obj(
self,
obj_id: int | str,
m_to_m_model: Model,
q: Q,
) -> Response:
"""Добавляет/удаляет связь `many to many`.
Args:
obj_id:
`id` объекта, с которым требуется создать/удалить связь.
m_to_m_model:
М2M модель управляющая требуемой связью.
q:
Условие фильтрации объектов.
Returns:
Responce: Статус подтверждающий/отклоняющий действие.
"""
obj = get_object_or_404(self.queryset, id=obj_id)
serializer: ModelSerializer = self.add_serializer(obj)
m2m_obj = m_to_m_model.objects.filter(q & Q(user=self.request.user))
if (
self.request.method in Methods.GET_POST_METHODS
) and not m2m_obj.exists():
m_to_m_model(None, obj.id, self.request.user.id).save()
return Response(serializer.data, status=status.HTTP_201_CREATED)
if (self.request.method in Methods.DEL_METHODS) and m2m_obj.exists():
m2m_obj[0].delete()
return Response(status=status.HTTP_204_NO_CONTENT)
if not m2m_obj.exists():
return Response(
{'errors': f'Вы не подписаны на {obj.username}!'},
status=status.HTTP_400_BAD_REQUEST,
)
| Yohimbe227/The-Social-Recipe-Network | backend/core/classes.py | classes.py | py | 2,297 | python | ru | code | 0 | github-code | 13 |
9023398094 | from itertools import permutations
x = input()
y = []
k = []
for a in x:
y.append(a)
print(*y)
for n in range(len(y)):
j=0
for j in range(len(y)):
c = y[n] + y[j]
k.append(c)
j += 1
n += 1
print(*k)
a = sorted(list(permutations(y,2)))
for k in a:
print ("".join(k))
| animeshmod/python-practice | stirng_break.py | stirng_break.py | py | 314 | python | en | code | 0 | github-code | 13 |
47917483924 | """migration
Revision ID: d1f28027789c
Revises:
Create Date: 2021-02-01 22:04:46.812707
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = 'd1f28027789c'
down_revision = None
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.create_table('user',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('username', sa.String(length=15), nullable=False),
sa.Column('email', sa.String(length=15), nullable=False),
sa.Column('password', sa.String(length=80), nullable=True),
sa.Column('user_image', sa.Text(), nullable=False),
sa.PrimaryKeyConstraint('id')
)
op.create_table('follows',
sa.Column('follow_id', sa.Integer(), nullable=False),
sa.Column('followed_id', sa.Integer(), nullable=True),
sa.Column('followed_by_id', sa.Integer(), nullable=True),
sa.ForeignKeyConstraint(['followed_by_id'], ['user.id'], ),
sa.ForeignKeyConstraint(['followed_id'], ['user.id'], ),
sa.PrimaryKeyConstraint('follow_id')
)
op.create_table('messages',
sa.Column('message_id', sa.Integer(), nullable=False),
sa.Column('message_body', sa.String(length=250), nullable=False),
sa.Column('sender_id', sa.Integer(), nullable=True),
sa.Column('recepient_id', sa.Integer(), nullable=True),
sa.ForeignKeyConstraint(['recepient_id'], ['user.id'], ),
sa.ForeignKeyConstraint(['sender_id'], ['user.id'], ),
sa.PrimaryKeyConstraint('message_id')
)
op.create_table('post',
sa.Column('post_id', sa.Integer(), nullable=False),
sa.Column('post_body', sa.String(length=250), nullable=False),
sa.Column('post_image', sa.Text(), nullable=False),
sa.Column('post_date', sa.DateTime(timezone=True), nullable=False),
sa.Column('user_id', sa.Integer(), nullable=True),
sa.ForeignKeyConstraint(['user_id'], ['user.id'], ),
sa.PrimaryKeyConstraint('post_id')
)
op.create_table('comments',
sa.Column('comment_id', sa.Integer(), nullable=False),
sa.Column('comment_body', sa.String(length=250), nullable=False),
sa.Column('author_id', sa.Integer(), nullable=True),
sa.Column('post_id', sa.Integer(), nullable=True),
sa.ForeignKeyConstraint(['author_id'], ['user.id'], ),
sa.ForeignKeyConstraint(['post_id'], ['post.post_id'], ),
sa.PrimaryKeyConstraint('comment_id')
)
op.create_table('likes',
sa.Column('like_id', sa.Integer(), nullable=False),
sa.Column('user_id', sa.Integer(), nullable=True),
sa.Column('post_id', sa.Integer(), nullable=True),
sa.ForeignKeyConstraint(['post_id'], ['post.post_id'], ),
sa.ForeignKeyConstraint(['user_id'], ['user.id'], ),
sa.PrimaryKeyConstraint('like_id')
)
op.create_table('notifications',
sa.Column('notification_id', sa.Integer(), nullable=False),
sa.Column('to_be_notified_id', sa.Integer(), nullable=True),
sa.Column('action_by_id', sa.Integer(), nullable=True),
sa.Column('post_id', sa.Integer(), nullable=True),
sa.Column('is_like', sa.Integer(), nullable=True),
sa.Column('is_comment', sa.Integer(), nullable=True),
sa.Column('is_follow', sa.Integer(), nullable=True),
sa.ForeignKeyConstraint(['action_by_id'], ['user.id'], ),
sa.ForeignKeyConstraint(['post_id'], ['post.post_id'], ),
sa.ForeignKeyConstraint(['to_be_notified_id'], ['user.id'], ),
sa.PrimaryKeyConstraint('notification_id')
)
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_table('notifications')
op.drop_table('likes')
op.drop_table('comments')
op.drop_table('post')
op.drop_table('messages')
op.drop_table('follows')
op.drop_table('user')
# ### end Alembic commands ###
| Shahid313/social-media-website | migrations/versions/d1f28027789c_migration.py | d1f28027789c_migration.py | py | 3,839 | python | en | code | 0 | github-code | 13 |
1232765265 | import argparse
import shutil
import os
import subprocess
import sys
import re
import csv
from bench_utils import *
import accel_conf
APIS = {"HighLevel" : ["HighLevel", "high", "hl"],
"MiddleLayer" : ["MiddleLayer", "middle", "ml"],
"JobApi" : ["JobApi", "job"]}
FIELDS_BASE = {CONFIG : '',
TIMING : '',
SRC_MEM : '',
DST_MEM : '',
API : '',
PATH : '',
EXEC : '',
OPERATION : '',
THREADS : '',
QDEPTH : '',
BATCH_SIZE : '',
DATA : '',
SIZE : '',
BLOCK_SIZE : '',
BLOCK : '',
COMP_PARAMS : '',
FILT_PARAMS : '',
RATIO : ''}
FIELDS = {**FIELDS_BASE,
THROUGHPUT : '',
LATENCY : ''}
def get_filter(op, path, api, latency, size, batch, depth, threads):
filter = ""
if latency:
filter += "^Latency"
else:
filter += "^Throughput"
if batch == 1:
filter += ".*<Batch"
filter += ".*" + api
if path == "hw":
filter += ".*Hardware"
elif path == "sw":
filter += ".*Software"
elif path == "auto":
filter += ".*Auto"
if size:
filter += ".*size:" + str(size) + "/"
if batch == 0:
filter += ".*batch:1/"
elif batch > 1:
filter += ".*batch:" + str(batch) + "/"
if depth:
filter += ".*depth:" + str(depth) + "/"
if threads:
filter += ".*threads:" + str(threads) + "$"
return filter
def run_generic(cmd, args, test):
cmd = ["nice", "-n", "-20", "numactl", "--cpunodebind=0", "--membind=0", cmd] + args + ['--benchmark_out_format=csv', '--benchmark_counters_tabular=true', '--benchmark_min_time=0.3' if not test else '--benchmark_min_time=0']
for arg in cmd:
print(arg + " ", end='')
print("")
p = subprocess.run(cmd, universal_newlines=True)
if p.returncode:
print(cmd[0] + " - error")
def run_dml(bin_path, results_path, filter):
case_filter = "--benchmark_filter=" + filter
run_generic(cmd=bin_path, args=['--benchmark_out=' + results_path, case_filter])
def process_results(raw_res_path, res_path, config_str):
result_file = open(res_path, "a")
result = csv.DictWriter(result_file, fieldnames=FIELDS.keys(), delimiter=';')
if result_file.tell() == 0:
result.writeheader()
print("Processing report " + raw_res_path)
raw_result_file = open(raw_res_path, "r")
pos = 0
line = raw_result_file.readline()
while line:
if "name," in line:
raw_result_file.seek(pos)
break
pos = raw_result_file.tell()
line = raw_result_file.readline()
raw_report = list(csv.DictReader(raw_result_file))
raw_result_file.close()
for line in raw_report:
FIELDS.fromkeys(FIELDS, "")
FIELDS[CONFIG] = config_str
FIELDS[OPERATION] = find_pattern(line["name"], "^(.+?)/", required=True)
FIELDS[THREADS] = find_pattern(line["name"], ".*/threads:(.+?)$", required=False, empty=1)
FIELDS[TIMING] = find_param(line["name"], "timer", required=True)
FIELDS[SRC_MEM] = find_param(line["name"], "in_mem", required=True)
FIELDS[DST_MEM] = find_param(line["name"], "out_mem", required=True)
FIELDS[API] = find_param(line["name"], "api", required=True)
FIELDS[PATH] = find_param(line["name"], "path", required=True)
FIELDS[EXEC] = find_param(line["name"], "exec", required=True)
FIELDS[QDEPTH] = find_param(line["name"], "qsize", required=True)
FIELDS[BATCH_SIZE] = find_param(line["name"], "batch")
FIELDS[DATA] = find_param(line["name"], "data")
FIELDS[SIZE] = find_param(line["name"], "size")
FIELDS[BLOCK_SIZE] = find_param(line["name"], "data:.+?/", delim="")
FIELDS[BLOCK] = find_param(line["name"], "block")
FIELDS[COMP_PARAMS] = find_param(line["name"], "gen_path", set=True) +\
find_param(line["name"], "huffman", set=True) +\
find_param(line["name"], "stat_int", set=True) +\
find_param(line["name"], "dict", set=True) +\
find_param(line["name"], "lvl", set=True, last=True)
FIELDS[FILT_PARAMS] = "n/a"
FIELDS[API] = "qpl_" + FIELDS[API]
try:
if line["Throughput"] == "" or line["Latency/Op"] == "" or line["Ratio"] == "":
raise
FIELDS[RATIO] = float(line["Ratio"])
FIELDS[THROUGHPUT] = float(line["Throughput"])/1000000000 # GB/s
FIELDS[LATENCY] = float(line["Latency/Op"])*1000000000 # ns
except:
FIELDS[RATIO] = "ERROR"
FIELDS[THROUGHPUT] = "ERROR"
FIELDS[LATENCY] = "ERROR"
result.writerow(FIELDS)
result_file.close()
os.remove(raw_res_path)
def run_case(args, config_str, res_postfix, res_path):
if args.filter:
raw_res_path = args.res_path + "dml_raw_" + config_str + res_postfix + ".csv"
run_dml(args.bin_path, raw_res_path, filter=args.filter)
process_results(raw_res_path, res_path, config)
else:
for op in OP_CODES:
if not args.case in op:
continue
for api in APIS:
if not find_alias(args.api, APIS[api]):
continue
for latency in [0, 1]:
if args.latency >= 0:
if args.latency == 0 and latency == 1:
continue
if args.latency >= 1 and latency == 0:
continue
for batch in [0, 1]:
if args.batch >= 0:
if args.batch == 0 and batch == 1:
continue
if args.batch >= 1 and batch == 0:
continue
if batch:
if args.batch < 0:
batch_arg = 1
else:
batch_arg = args.batch
else:
batch_arg = 0
res_prefix = op + "_" + api + "_"
if latency:
res_prefix += "latency_"
else:
res_prefix += "throughput_"
if batch:
res_prefix += "b_"
else:
res_prefix += "nb_"
raw_res_path = args.res_path + "dml_raw_" + res_prefix + config_str + res_postfix + ".csv"
print("")
run_dml(args.bin_path, raw_res_path, filter=get_filter(op, args.path, api, latency, args.size, batch_arg, args.depth, args.threads))
process_results(raw_res_path, res_path, config_str)
def run_bench(args):
config = {NUMAS:0, DEVICES:0, WQS:0, ENGINES:0}
config[NUMAS], config[DEVICES], config[WQS], config[ENGINES] = accel_conf.get_aggregated(dev_filter="dsa")
config_str = str(config[NUMAS]) + "n" + str(config[DEVICES]) + "d" + str(config[ENGINES]) + "e" + str(config[WQS]) + "w"
if args.clean and os.path.exists(args.res_path):
shutil.rmtree(args.res_path)
readers = []
for rep in range(1, args.repetitions+1):
res_postfix = ""
if args.repetitions > 1:
res_postfix = "_run_" + str(rep)
print("Run " + str(rep))
res_path = args.res_path + "dml_" + config_str + res_postfix + ".csv"
print("Results path: ", res_path)
if not os.path.exists(args.res_path):
os.makedirs(args.res_path)
else:
if os.path.exists(res_path):
os.remove(res_path)
run_case(args, config_str, res_postfix, res_path)
if args.repetitions > 1:
result_file = open(res_path, "r")
readers.append(list(csv.DictReader(result_file, delimiter=';')));
result_file.close()
if args.repetitions > 1:
res_path = args.res_path + "dml_" + config_str + ".csv"
result_file = open(res_path, "w+")
result = csv.DictWriter(result_file, fieldnames=FIELDS.keys(), delimiter=';')
result.writeheader()
for row in range(len(readers[0])):
min = dict(readers[0][row])
for reader in range(1, args.repetitions):
second = readers[reader][row]
try:
if float(min[LATENCY]) > float(second[LATENCY]):
min[LATENCY] = second[LATENCY]
min[THROUGHPUT] = second[THROUGHPUT]
except:
if not isinstance(min[LATENCY], float):
min[LATENCY] = second[LATENCY]
min[THROUGHPUT] = second[THROUGHPUT]
if isinstance(min[LATENCY], float) or isinstance(second[LATENCY], float):
print("Warning: unexpected error in run " + reader)
print(second)
result.writerow(min)
result_file.close()
return res_path
if __name__ == "__main__":
parser = argparse.ArgumentParser(description='Benchmark runnner for DML')
parser.add_argument('--bin-path', default='./dml_benchmarks', metavar='BIN_PATH', help='Path to benchmark')
parser.add_argument('--res-path', default='./results/', metavar='RES_PATH', help='Path to results')
parser.add_argument('--repetitions', default=1, type=int, metavar='REPETITIONS', help='Number of tests repetitions')
parser.add_argument('--path', default="hw", metavar='PATH', help='Execution path: hw, sw, auto, all. May be not applicable to some APIs')
parser.add_argument('--api', default="", metavar='API', help='API to run: hl or high, ml or mid, job')
parser.add_argument('--case', default="", metavar='CASE', help='Case to run: MemMove, Fill, Compare, ComparePattern, '\
'CreateDelta, ApplyDelta, CopyDualcast, CRC, CopyCRC, '\
'DIFCheck, DIFInsert, DIFStrip, DIFUpdate')
parser.add_argument('--threads', default=0, type=int, metavar='THREADS', help='Number if threads to use for submission')
parser.add_argument('--latency', default=-1, type=int, metavar='LATENCY', help='Test latency: 0 - do not test, 1 - test latency only')
parser.add_argument('--size', default=0, type=int, metavar='SIZE', help='Specific size to test')
parser.add_argument('--batch', default=-1, type=int, metavar='BATCH_SIZE', help='Specific batch size to test: 0 - do not test; 1 - test batch only;')
parser.add_argument('--depth', default=0, type=int, metavar='QDEPTH', help='Specific queue depth to test')
parser.add_argument('--filter', default="", metavar='CASE', help='Custom filter for benchmark, overrides keys: api, case, latency, size, batch, depth')
parser.add_argument('--clean', default=False, action='store_true', help='Clean results folder before measurements')
parser.add_argument('--test', default=False, action='store_true', help='Test run with fastest options')
args = parser.parse_args()
print("Benchmark path: ", args.bin_path)
run_bench(args)
| intel/DML | tools/benchmarks/scripts/run_dml_benchmarks.py | run_dml_benchmarks.py | py | 11,921 | python | en | code | 62 | github-code | 13 |
44025457321 | import random
import numpy as np
from pysc2.lib import actions, units
from collections import defaultdict
from Models.BuildOrders.ActionSingleton import ActionSingleton
from Models.HelperClass.HelperClass import HelperClass
class State:
def __init__(self, bot_obj):
# Game state
self.units_amount = defaultdict(lambda: 0) # Amount of each unit. Set to 0 by default
self.units_amount[units.Terran.SCV] = 12
self.units_amount[units.Terran.CommandCenter] = 1
self.enemy_units_amount = defaultdict(lambda: 0)
self.enemy_units_amount[units.Terran.SCV] = 12
self.enemy_units_amount[units.Terran.CommandCenter] = 1
self.minerals = 50
self.vespene = 0
self.food_used = 12
self.food_cap = 15
self.idle_workers = 0
self.oldscore = 1000
self.reward = 0
self.last_attacked = 0
self.units_attacked = 0
self.bot_obj = bot_obj
def get_state(self):
"""
:return: A list containing all the tuples (minerals, vespene, unit_amount, action_issued, bot_obj.steps)
since the start of the game
"""
return self.state_tuple
def update_state(self, bot_obj, obs):
"""
Updates the state and adds up to 1 production facility to control group. Always takes 4 steps to execute.
:param bot_obj: The agent
:param obs: The observation
:return: Actions
"""
new_action = [actions.FUNCTIONS.no_op()] # No action by default
if bot_obj.reqSteps == 0:
bot_obj.reqSteps = 3
new_action = [actions.FUNCTIONS.select_control_group("recall", 9)]
# Section for adding unselected production building to control group 9.
# It only adds one building per state update to keep state update lengths consistent.
# When at this stage, control group 9 should be selected.
# This section should be ran even when the control group is correct.
elif bot_obj.reqSteps == 3:
unselected_production = self.get_unselected_production_buildings(obs, on_screen=False)
if len(unselected_production) > 0:
unit = random.choice(unselected_production)
new_action = HelperClass.move_screen(obs, (unit.x, unit.y))
bot_obj.reqSteps = 2
elif bot_obj.reqSteps == 2:
unselected_production = self.get_unselected_production_buildings(obs, on_screen=True)
if len(unselected_production) > 0:
unit = random.choice(unselected_production)
new_action = [actions.FUNCTIONS.select_point(
"select",
(HelperClass.sigma(unit.x+random.randint(0, 3)),
HelperClass.sigma(unit.y+random.randint(0, 3))))]
bot_obj.reqSteps = 1
elif bot_obj.reqSteps == 1:
# single_select is an array of zeros if nothing is selected.
# The following line checks for when hp > 0 (i.e. a unit is actually selected)
if obs.observation.single_select[0][2] > 0:
if (obs.observation.single_select[0].unit_type == units.Terran.CommandCenter or
obs.observation.single_select[0].unit_type == units.Terran.Barracks or
obs.observation.single_select[0].unit_type == units.Terran.Factory or
obs.observation.single_select[0].unit_type == units.Terran.Starport):
new_action = [actions.FUNCTIONS.select_control_group("append", 9)]
bot_obj.reqSteps = 0
# Update the score and reward
bot_obj.game_state_updated = True
ActionSingleton().set_action(new_action)
def get_state_now(self, obs):
# Update any state that doesn't require actions
oldscore = self.oldscore
score = obs.observation.score_cumulative.score
if score != oldscore:
self.reward = score - self.oldscore
else:
self.reward = 0
if obs.observation.player.minerals > 3000:
minerals = 1
else:
minerals = obs.observation.player.minerals/3000
if obs.observation.player.vespene > 3000:
vespene = 1
else:
vespene = obs.observation.player.vespene/3000
food_used = obs.observation.player.food_used/200
food_cap = obs.observation.player.food_cap/200
idle_workers = obs.observation.player.idle_worker_count/200
self.oldscore = score
# Filter out SCVs before updating units_amount because they disappear when they go into refineries
own_units = [u for u in obs.observation.raw_units
if u.alliance == 1 and u.unit_type != units.Terran.SCV]
# Quickly checks if the state has changed. Not sure if actually faster.
units_amount = defaultdict(lambda: 0)
own_unit_types = [u.unit_type for u in own_units]
unit_types, unit_type_counts = np.unique(np.array(own_unit_types), return_counts=True)
for (unit_type, unit_type_count) in zip(unit_types, unit_type_counts):
units_amount[unit_type] = unit_type_count
units_amount[units.Terran.SCV] = obs.observation.player.food_workers
# Counts enemy units
enemy_units_amount = defaultdict(lambda: 0)
enemy_units = [u for u in obs.observation.raw_units
if u.alliance == 4]
enemy_unit_types = [u.unit_type for u in enemy_units]
unit_types, unit_type_counts = np.unique(np.array(enemy_unit_types), return_counts=True)
for (unit_type, unit_type_count) in zip(unit_types, unit_type_counts):
enemy_units_amount[unit_type] = unit_type_count
enemy_army = len([u for u in enemy_units
if u.unit_type in [units.Terran.Marine,
units.Terran.Marauder,
units.Terran.Medivac,
units.Terran.Reaper,
units.Terran.Hellion,
units.Terran.Hellbat,
units.Terran.VikingFighter,
units.Terran.VikingAssault,
units.Terran.Thor,
units.Terran.ThorHighImpactMode,
units.Terran.SiegeTank,
units.Terran.SiegeTankSieged,
units.Terran.Cyclone,
units.Terran.Raven,
units.Terran.Ghost,
units.Terran.Liberator,
units.Terran.LiberatorAG,
units.Terran.Battlecruiser,
units.Terran.Banshee,
units.Terran.WidowMine,
units.Terran.WidowMineBurrowed,
units.Terran.SCV
]])
enemy_buildings = len([u for u in enemy_units
if u.unit_type in [units.Terran.CommandCenter,
units.Terran.CommandCenterFlying,
units.Terran.OrbitalCommand,
units.Terran.OrbitalCommandFlying,
units.Terran.PlanetaryFortress,
units.Terran.SupplyDepot,
units.Terran.SupplyDepotLowered,
units.Terran.Refinery,
units.Terran.Barracks,
units.Terran.BarracksFlying,
units.Terran.BarracksReactor,
units.Terran.BarracksTechLab,
units.Terran.EngineeringBay,
units.Terran.MissileTurret,
units.Terran.SensorTower,
units.Terran.Bunker,
units.Terran.Factory,
units.Terran.FactoryFlying,
units.Terran.FactoryReactor,
units.Terran.FactoryTechLab,
units.Terran.Armory,
units.Terran.GhostAcademy,
units.Terran.Starport,
units.Terran.StarportFlying,
units.Terran.StarportReactor,
units.Terran.StarportTechLab,
units.Terran.FusionCore
]])
return np.array([[minerals, vespene, food_used, food_cap, idle_workers,
units_amount[units.Terran.CommandCenter],
units_amount[units.Terran.SupplyDepot]/24,
units_amount[units.Terran.Barracks]/10,
units_amount[units.Terran.Marine]/200,
units_amount[units.Terran.SCV]/200,
enemy_army,
enemy_buildings,
self.last_attacked,
self.units_attacked / 200,
self.bot_obj.steps*8/30000]]), oldscore, obs.observation.feature_minimap.player_relative
@staticmethod
def get_unselected_production_buildings(obs, on_screen=False):
"""
This methods returns a list of production buildings (buildings capable of producing units) that aren't
in currently selected. Note that it doesn't count Barracks with tech labs.
:param obs:
:param on_screen: Whether or not the list should only contain units visible on the screen
:return:
"""
if on_screen:
return [u for u in obs.observation.feature_units
if u.alliance == 1 and not u.is_selected
and (
u.unit_type == units.Terran.CommandCenter or
u.unit_type == units.Terran.Barracks or
u.unit_type == units.Terran.Factory or
u.unit_type == units.Terran.Starport
)]
else:
return [u for u in obs.observation.raw_units
if u.alliance == 1 and not u.is_selected
and (
u.unit_type == units.Terran.CommandCenter or
u.unit_type == units.Terran.Barracks or
u.unit_type == units.Terran.Factory or
u.unit_type == units.Terran.Starport
)]
| DukeA/DAT02X-19-03-MachineLearning-Starcraft2 | Src/Models/BotFile/State.py | State.py | py | 11,603 | python | en | code | 0 | github-code | 13 |
43254290911 | N = int(input())
arr = list(map(int, input().split()))
dp = [[0 for _ in range(21)] for _ in range(N + 1)]
dp[1][arr[0]] = 1
for j in range(1, N):
for i in range(21):
if dp[j][i] > 0:
if 0 <= i - arr[j] <= 20:
dp[j + 1][i - arr[j]] += (dp[j][i])
if 0 <= i + arr[j] <= 20:
dp[j + 1][i + arr[j]] += (dp[j][i])
print(dp[N - 1][arr[N - 1]]) | KimSoomae/Algoshipda | week12/G5/고재현_5557_1학년.py | 고재현_5557_1학년.py | py | 408 | python | en | code | 0 | github-code | 13 |
30605394521 | from django.db import models
#from RegisterUsers.models import Patient
#from RegisterUsers.models import Doctor
# Create your models here.
class ScheduleAppointment(models.Model):
#patient = models.ForeignKey(Patient,on_delete=models.CASCADE)
#doctor = models.ForeignKey(Doctor,on_delete=models.CASCADE)
#using = 'appointmentdb'
doctor_id = models.IntegerField(null=True)
doctor_name = models.CharField(max_length=255, null=True)
appointment_time = models.DateTimeField(null=True)
| atheeswaran/Scalable-Services | appointmentScheduling/AppointmentScheduling/models.py | models.py | py | 509 | python | en | code | 0 | github-code | 13 |
72722421458 | from typing import List, Tuple
class Config:
correct_config = "012345678"
def __init__(self, config, zero_x: int, zero_y: int, parent=None, depth=-1):
self.config = config
self.zero_x = zero_x
self.zero_y = zero_y
self.depth = depth
self.parent = parent
self.code = self.encode(config)
def encode(self, config) -> str:
code = ""
for i in range(0, 3):
for j in range(0, 3):
code += str(config[i][j])
return code
def is_equal(self, config: str):
return config == self.code
def correct(self):
return self.is_equal(Config.correct_config)
# Verifies which directions the empty cell is able to move
def variations(self) -> List[Tuple[int, int]]:
variations_list = []
if self.zero_x < 2:
variations_list.append((1, 0))
if self.zero_x > 0:
variations_list.append((-1, 0))
if self.zero_y < 2:
variations_list.append((0, 1))
if self.zero_y > 0:
variations_list.append((0, -1))
return variations_list
def print(self):
for i in range(0, 3):
for j in range(0, 3):
print(str(self.config[i][j]), end=' ')
print() | danielspeixoto/8PuzzleSolver | Config.py | Config.py | py | 1,298 | python | en | code | 0 | github-code | 13 |
43230325606 | #!/usr/bin/env python
import os
from os import path
import sys
# Directory containing this program.
PROGDIR = path.dirname(path.realpath(__file__))
# For python_config.
sys.path.insert(0, path.join(PROGDIR, "..", "..", "..", "etc"))
# Use non-interactive backend.
import matplotlib
matplotlib.use("Agg")
import python_config
import sg
def main():
assert len(sys.argv) == 3, \
"Expected one argument: experiment data dir, output dir"
edr, odr = sys.argv[1:]
if not path.isdir(edr):
print("The first argument must be a directory, but is: {}".format(edr))
sys.exit(-1)
if path.exists(odr):
if not path.isdir(odr):
print("Output directory exists and is a file: {}".format(odr))
sys.exit(-1)
else:
os.makedirs(odr)
# Create a graph for each TCP variant.
for var in ["cubic"]:
sg.seq(
name="seq-dyn-{}".format(var),
edr=edr,
odr=odr,
ptn="*-QUEUE-True-*-{}-*click.txt".format(var),
key_fnc=lambda fn: int(round(float(fn.split("-")[6])
/ python_config.TDF)),
dur=1200,
# chunk_mode=100,
msg_len=116,
log_pos="after")
# ins=((2780, 2960), (200, 340)),
#flt=lambda idx, label: idx < 10)
# flt=lambda idx, label: idx in [0, 1, 3, 4, 5, 6, 7, 8, 9, 10])
if __name__ == "__main__":
main()
| mukerjee/etalon | experiments/buffers/sequence_graphs/sg_cc.py | sg_cc.py | py | 1,474 | python | en | code | 12 | github-code | 13 |
17037610184 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import json
from alipay.aop.api.constant.ParamConstants import *
class AlipayCommerceEducateTuitioncodePlanruleSendModel(object):
def __init__(self):
self._allot_type = None
self._execute_type = None
self._out_biz_no = None
self._period = None
self._setting_type = None
self._smid = None
@property
def allot_type(self):
return self._allot_type
@allot_type.setter
def allot_type(self, value):
self._allot_type = value
@property
def execute_type(self):
return self._execute_type
@execute_type.setter
def execute_type(self, value):
self._execute_type = value
@property
def out_biz_no(self):
return self._out_biz_no
@out_biz_no.setter
def out_biz_no(self, value):
self._out_biz_no = value
@property
def period(self):
return self._period
@period.setter
def period(self, value):
self._period = value
@property
def setting_type(self):
return self._setting_type
@setting_type.setter
def setting_type(self, value):
self._setting_type = value
@property
def smid(self):
return self._smid
@smid.setter
def smid(self, value):
self._smid = value
def to_alipay_dict(self):
params = dict()
if self.allot_type:
if hasattr(self.allot_type, 'to_alipay_dict'):
params['allot_type'] = self.allot_type.to_alipay_dict()
else:
params['allot_type'] = self.allot_type
if self.execute_type:
if hasattr(self.execute_type, 'to_alipay_dict'):
params['execute_type'] = self.execute_type.to_alipay_dict()
else:
params['execute_type'] = self.execute_type
if self.out_biz_no:
if hasattr(self.out_biz_no, 'to_alipay_dict'):
params['out_biz_no'] = self.out_biz_no.to_alipay_dict()
else:
params['out_biz_no'] = self.out_biz_no
if self.period:
if hasattr(self.period, 'to_alipay_dict'):
params['period'] = self.period.to_alipay_dict()
else:
params['period'] = self.period
if self.setting_type:
if hasattr(self.setting_type, 'to_alipay_dict'):
params['setting_type'] = self.setting_type.to_alipay_dict()
else:
params['setting_type'] = self.setting_type
if self.smid:
if hasattr(self.smid, 'to_alipay_dict'):
params['smid'] = self.smid.to_alipay_dict()
else:
params['smid'] = self.smid
return params
@staticmethod
def from_alipay_dict(d):
if not d:
return None
o = AlipayCommerceEducateTuitioncodePlanruleSendModel()
if 'allot_type' in d:
o.allot_type = d['allot_type']
if 'execute_type' in d:
o.execute_type = d['execute_type']
if 'out_biz_no' in d:
o.out_biz_no = d['out_biz_no']
if 'period' in d:
o.period = d['period']
if 'setting_type' in d:
o.setting_type = d['setting_type']
if 'smid' in d:
o.smid = d['smid']
return o
| alipay/alipay-sdk-python-all | alipay/aop/api/domain/AlipayCommerceEducateTuitioncodePlanruleSendModel.py | AlipayCommerceEducateTuitioncodePlanruleSendModel.py | py | 3,360 | python | en | code | 241 | github-code | 13 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.